source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
test_web.py | # test_web.py -- Compatibility tests for the git web server.
# Copyright (C) 2010 Google, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# of the License or (at your option) any later version of
# the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""Compatibility tests between Dulwich and the cgit HTTP server.
warning: these tests should be fairly stable, but when writing/debugging new
tests, deadlocks may freeze the test process such that it cannot be
Ctrl-C'ed. On POSIX systems, you can kill the tests with Ctrl-Z, "kill %".
"""
import threading
from unittest import (
SkipTest,
)
from wsgiref import simple_server
from dulwich.server import (
DictBackend,
)
from dulwich.web import (
make_wsgi_chain,
HTTPGitApplication,
WSGIRequestHandlerLogger,
WSGIServerLogger,
)
from dulwich.tests.compat.server_utils import (
ServerTests,
NoSideBand64kReceivePackHandler,
)
from dulwich.tests.compat.utils import (
CompatTestCase,
)
class WebTests(ServerTests):
"""Base tests for web server tests.
Contains utility and setUp/tearDown methods, but does non inherit from
TestCase so tests are not automatically run.
"""
protocol = 'http'
def _start_server(self, repo):
backend = DictBackend({'/': repo})
app = self._make_app(backend)
dul_server = simple_server.make_server(
'localhost', 0, app, server_class=WSGIServerLogger,
handler_class=WSGIRequestHandlerLogger)
self.addCleanup(dul_server.shutdown)
self.addCleanup(dul_server.server_close)
threading.Thread(target=dul_server.serve_forever).start()
self._server = dul_server
_, port = dul_server.socket.getsockname()
return port
class SmartWebTestCase(WebTests, CompatTestCase):
"""Test cases for smart HTTP server.
This server test case does not use side-band-64k in git-receive-pack.
"""
min_git_version = (1, 6, 6)
def _handlers(self):
return {'git-receive-pack': NoSideBand64kReceivePackHandler}
def _check_app(self, app):
receive_pack_handler_cls = app.handlers['git-receive-pack']
caps = receive_pack_handler_cls.capabilities()
self.assertFalse('side-band-64k' in caps)
def _make_app(self, backend):
app = make_wsgi_chain(backend, handlers=self._handlers())
to_check = app
# peel back layers until we're at the base application
while not issubclass(to_check.__class__, HTTPGitApplication):
to_check = to_check.app
self._check_app(to_check)
return app
class SmartWebSideBand64kTestCase(SmartWebTestCase):
"""Test cases for smart HTTP server with side-band-64k support."""
# side-band-64k in git-receive-pack was introduced in git 1.7.0.2
min_git_version = (1, 7, 0, 2)
def _handlers(self):
return None # default handlers include side-band-64k
def _check_app(self, app):
receive_pack_handler_cls = app.handlers['git-receive-pack']
caps = receive_pack_handler_cls.capabilities()
self.assertTrue('side-band-64k' in caps)
class DumbWebTestCase(WebTests, CompatTestCase):
"""Test cases for dumb HTTP server."""
def _make_app(self, backend):
return make_wsgi_chain(backend, dumb=True)
def test_push_to_dulwich(self):
# Note: remove this if dulwich implements dumb web pushing.
raise SkipTest('Dumb web pushing not supported.')
def test_push_to_dulwich_remove_branch(self):
# Note: remove this if dumb pushing is supported
raise SkipTest('Dumb web pushing not supported.')
def test_new_shallow_clone_from_dulwich(self):
# Note: remove this if C git and dulwich implement dumb web shallow
# clones.
raise SkipTest('Dumb web shallow cloning not supported.')
def test_fetch_same_depth_into_shallow_clone_from_dulwich(self):
# Note: remove this if C git and dulwich implement dumb web shallow
# clones.
raise SkipTest('Dumb web shallow cloning not supported.')
def test_fetch_full_depth_into_shallow_clone_from_dulwich(self):
# Note: remove this if C git and dulwich implement dumb web shallow
# clones.
raise SkipTest('Dumb web shallow cloning not supported.')
|
manual_performance.py | #!/usr/bin/env python
import angr
import argparse
import sys
import time
import os
import math
import random
import resource
import multiprocessing
from tabulate import tabulate
from os.path import join, dirname, realpath
from progressbar import ProgressBar, Percentage, Bar
test_location = str(join(dirname(realpath(__file__)), '..', '..', 'binaries', 'tests'))
class Timer(object):
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.msecs = (self.end - self.start) * 1000
def mean(r):
return 0. if not r else float(sum(r)) / len(r)
def std(r):
average = mean(r)
return math.sqrt(float(sum(pow(x - average, 2) for x in r)) / len(r))
def print_results(tests):
table_runs = []
table_mems = []
for name, test in tests.items():
runs = test['runs']
table_runs.append([name, str(min(runs)), str(max(runs)), str(mean(runs)), str(std(runs))])
for name, test in tests.items():
mems = test['mems']
table_mems.append([name, str(min(mems)), str(max(mems)), str(mean(mems)), str(std(mems))])
header = ['name', 'min', 'max', 'avg', 'std']
print('Timing (in milliseconds)')
print(tabulate(table_runs, headers=header))
print('Maximum RAM usage (in MB)')
print(tabulate(table_mems, headers=header))
def run_counter(path):
p = angr.Project(path)
sm = p.factory.simulation_manager()
sm.run(n=500)
def run_cfg_analysis(path):
load_options = {}
load_options['auto_load_libs'] = False
p = angr.Project(path,
load_options=load_options,
translation_cache=True
)
p.analyses.CFGEmulated()
def time_one(args, test, queue):
filepath = test['filepath']
func = test['test_func']
random.seed(args.seed)
with Timer() as t:
func(filepath)
queue.put(t.msecs)
queue.put(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000.0)
parser = argparse.ArgumentParser(description='angr performance tests')
parser.add_argument(
'-n', '--n-runs', default=100, type=int,
help='How many runs to perform for each test (default: 100)')
parser.add_argument(
'-s', '--seed', default=1234, type=int,
help='Seed for random (default: 1234)')
args = parser.parse_args()
tests = {
'fauxware_cfg_i386': {
'filepath': join(test_location, 'i386', 'fauxware'),
'test_func': run_cfg_analysis
}
}
# Add counter tests
arch_counter = [
'i386',
'armel',
'armhf',
'i386',
'mips',
'mipsel',
'ppc',
'ppc64',
'x86_64',
]
for arch in arch_counter:
tests['counter_' + arch] = {
'filepath': join(test_location, arch, 'counter'),
'test_func': run_counter
}
print('Seed: ' + str(args.seed))
print('N runs: ' + str(args.n_runs))
queue = multiprocessing.Queue()
for test in tests:
runs = []
mems = []
widgets = ['',
Percentage(), ' ',
Bar()
]
print(test)
pbar = ProgressBar(maxval=args.n_runs, widgets=widgets).start()
for i in range(0, args.n_runs):
p = multiprocessing.Process(target=time_one, args=(args, tests[test], queue))
p.start()
p.join()
runs.append(queue.get())
mems.append(queue.get())
pbar.update(i + 1)
print('')
tests[test]['runs'] = runs
tests[test]['mems'] = mems
print_results(tests)
|
t265_to_mavlink.py | #!/usr/bin/env python3
#####################################################
## librealsense T265 to MAVLink ##
#####################################################
# This script assumes pyrealsense2.[].so file is found under the same directory as this script
# Install required packages:
# pip3 install pyrealsense2
# pip3 install transformations
# pip3 install pymavlink
# pip3 install apscheduler
# pip3 install pyserial
# Set the path for IDLE
import sys
sys.path.append("/usr/local/lib/")
# Set MAVLink protocol to 2.
import os
os.environ["MAVLINK20"] = "1"
# Import the libraries
import pyrealsense2 as rs
import numpy as np
import transformations as tf
import math as m
import time
import argparse
import threading
import signal
from time import sleep
from apscheduler.schedulers.background import BackgroundScheduler
from dronekit import connect, VehicleMode
from pymavlink import mavutil
# Replacement of the standard print() function to flush the output
def progress(string):
print(string, file=sys.stdout)
sys.stdout.flush()
#######################################
# Parameters
#######################################
# Default configurations for connection to the FCU
connection_string_default = '/dev/ttyUSB0'
connection_baudrate_default = 921600
connection_timeout_sec_default = 5
# Transformation to convert different camera orientations to NED convention. Replace camera_orientation_default for your configuration.
# 0: Forward, USB port to the right
# 1: Downfacing, USB port to the right
# 2: Forward, 45 degree tilted down
# Important note for downfacing camera: you need to tilt the vehicle's nose up a little - not flat - before you run the script, otherwise the initial yaw will be randomized, read here for more details: https://github.com/IntelRealSense/librealsense/issues/4080. Tilt the vehicle to any other sides and the yaw might not be as stable.
camera_orientation_default = 0
# https://mavlink.io/en/messages/common.html#VISION_POSITION_ESTIMATE
enable_msg_vision_position_estimate = True
vision_position_estimate_msg_hz_default = 30.0
# https://mavlink.io/en/messages/ardupilotmega.html#VISION_POSITION_DELTA
enable_msg_vision_position_delta = False
vision_position_delta_msg_hz_default = 30.0
# https://mavlink.io/en/messages/common.html#VISION_SPEED_ESTIMATE
enable_msg_vision_speed_estimate = True
vision_speed_estimate_msg_hz_default = 30.0
# https://mavlink.io/en/messages/common.html#STATUSTEXT
enable_update_tracking_confidence_to_gcs = True
update_tracking_confidence_to_gcs_hz_default = 1.0
# Monitor user's online input via keyboard, can only be used when runs from terminal
enable_user_keyboard_input = False
# Default global position for EKF home/ origin
enable_auto_set_ekf_home = False
home_lat = 151269321 # Somewhere random
home_lon = 16624301 # Somewhere random
home_alt = 163000 # Somewhere random
# TODO: Taken care of by ArduPilot, so can be removed (once the handling on AP side is confirmed stable)
# In NED frame, offset from the IMU or the center of gravity to the camera's origin point
body_offset_enabled = 0
body_offset_x = 0 # In meters (m)
body_offset_y = 0 # In meters (m)
body_offset_z = 0 # In meters (m)
# Global scale factor, position x y z will be scaled up/down by this factor
scale_factor = 1.0
# Enable using yaw from compass to align north (zero degree is facing north)
compass_enabled = 0
# pose data confidence: 0x0 - Failed / 0x1 - Low / 0x2 - Medium / 0x3 - High
pose_data_confidence_level = ('FAILED', 'Low', 'Medium', 'High')
# lock for thread synchronization
lock = threading.Lock()
mavlink_thread_should_exit = False
# default exit code is failure - a graceful termination with a
# terminate signal is possible.
exit_code = 1
#######################################
# Global variables
#######################################
# FCU connection variables
# Camera-related variables
pipe = None
pose_sensor = None
linear_accel_cov = 0.01
angular_vel_cov = 0.01
# Data variables
data = None
prev_data = None
H_aeroRef_aeroBody = None
V_aeroRef_aeroBody = None
heading_north_yaw = None
current_confidence_level = None
current_time_us = 0
# Increment everytime pose_jumping or relocalization happens
# See here: https://github.com/IntelRealSense/librealsense/blob/master/doc/t265.md#are-there-any-t265-specific-options
# For AP, a non-zero "reset_counter" would mean that we could be sure that the user's setup was using mavlink2
reset_counter = 1
#######################################
# Parsing user' inputs
#######################################
parser = argparse.ArgumentParser(description='Reboots vehicle')
parser.add_argument('--connect',
help="Vehicle connection target string. If not specified, a default string will be used.")
parser.add_argument('--baudrate', type=float,
help="Vehicle connection baudrate. If not specified, a default value will be used.")
parser.add_argument('--vision_position_estimate_msg_hz', type=float,
help="Update frequency for VISION_POSITION_ESTIMATE message. If not specified, a default value will be used.")
parser.add_argument('--vision_position_delta_msg_hz', type=float,
help="Update frequency for VISION_POSITION_DELTA message. If not specified, a default value will be used.")
parser.add_argument('--vision_speed_estimate_msg_hz', type=float,
help="Update frequency for VISION_SPEED_DELTA message. If not specified, a default value will be used.")
parser.add_argument('--scale_calib_enable', default=False, action='store_true',
help="Scale calibration. Only run while NOT in flight")
parser.add_argument('--camera_orientation', type=int,
help="Configuration for camera orientation. Currently supported: forward, usb port to the right - 0; downward, usb port to the right - 1, 2: forward tilted down 45deg")
parser.add_argument('--debug_enable',type=int,
help="Enable debug messages on terminal")
args = parser.parse_args()
connection_string = args.connect
connection_baudrate = args.baudrate
vision_position_estimate_msg_hz = args.vision_position_estimate_msg_hz
vision_position_delta_msg_hz = args.vision_position_delta_msg_hz
vision_speed_estimate_msg_hz = args.vision_speed_estimate_msg_hz
scale_calib_enable = args.scale_calib_enable
camera_orientation = args.camera_orientation
debug_enable = args.debug_enable
# Using default values if no specified inputs
if not connection_string:
connection_string = connection_string_default
progress("INFO: Using default connection_string %s" % connection_string)
else:
progress("INFO: Using connection_string %s" % connection_string)
if not connection_baudrate:
connection_baudrate = connection_baudrate_default
progress("INFO: Using default connection_baudrate %s" % connection_baudrate)
else:
progress("INFO: Using connection_baudrate %s" % connection_baudrate)
if not vision_position_estimate_msg_hz:
vision_position_estimate_msg_hz = vision_position_estimate_msg_hz_default
progress("INFO: Using default vision_position_estimate_msg_hz %s" % vision_position_estimate_msg_hz)
else:
progress("INFO: Using vision_position_estimate_msg_hz %s" % vision_position_estimate_msg_hz)
if not vision_position_delta_msg_hz:
vision_position_delta_msg_hz = vision_position_delta_msg_hz_default
progress("INFO: Using default vision_position_delta_msg_hz %s" % vision_position_delta_msg_hz)
else:
progress("INFO: Using vision_position_delta_msg_hz %s" % vision_position_delta_msg_hz)
if not vision_speed_estimate_msg_hz:
vision_speed_estimate_msg_hz = vision_speed_estimate_msg_hz_default
progress("INFO: Using default vision_speed_estimate_msg_hz %s" % vision_speed_estimate_msg_hz)
else:
progress("INFO: Using vision_speed_estimate_msg_hz %s" % vision_speed_estimate_msg_hz)
if body_offset_enabled == 1:
progress("INFO: Using camera position offset: Enabled, x y z is %s %s %s" % (body_offset_x, body_offset_y, body_offset_z))
else:
progress("INFO: Using camera position offset: Disabled")
if compass_enabled == 1:
progress("INFO: Using compass: Enabled. Heading will be aligned to north.")
else:
progress("INFO: Using compass: Disabled")
if scale_calib_enable == True:
progress("\nINFO: SCALE CALIBRATION PROCESS. DO NOT RUN DURING FLIGHT.\nINFO: TYPE IN NEW SCALE IN FLOATING POINT FORMAT\n")
else:
if scale_factor == 1.0:
progress("INFO: Using default scale factor %s" % scale_factor)
else:
progress("INFO: Using scale factor %s" % scale_factor)
if not camera_orientation:
camera_orientation = camera_orientation_default
progress("INFO: Using default camera orientation %s" % camera_orientation)
else:
progress("INFO: Using camera orientation %s" % camera_orientation)
if camera_orientation == 0: # Forward, USB port to the right
H_aeroRef_T265Ref = np.array([[0,0,-1,0],[1,0,0,0],[0,-1,0,0],[0,0,0,1]])
H_T265body_aeroBody = np.linalg.inv(H_aeroRef_T265Ref)
elif camera_orientation == 1: # Downfacing, USB port to the right
H_aeroRef_T265Ref = np.array([[0,0,-1,0],[1,0,0,0],[0,-1,0,0],[0,0,0,1]])
H_T265body_aeroBody = np.array([[0,1,0,0],[1,0,0,0],[0,0,-1,0],[0,0,0,1]])
elif camera_orientation == 2: # 45degree forward
H_aeroRef_T265Ref = np.array([[0,0,-1,0],[1,0,0,0],[0,-1,0,0],[0,0,0,1]])
H_T265body_aeroBody = (tf.euler_matrix(m.pi/4, 0, 0)).dot(np.linalg.inv(H_aeroRef_T265Ref))
else: # Default is facing forward, USB port to the right
H_aeroRef_T265Ref = np.array([[0,0,-1,0],[1,0,0,0],[0,-1,0,0],[0,0,0,1]])
H_T265body_aeroBody = np.linalg.inv(H_aeroRef_T265Ref)
if not debug_enable:
debug_enable = 0
else:
debug_enable = 1
np.set_printoptions(precision=4, suppress=True) # Format output on terminal
progress("INFO: Debug messages enabled.")
#######################################
# Functions - MAVLink
#######################################
def mavlink_loop(conn, callbacks):
'''a main routine for a thread; reads data from a mavlink connection,
calling callbacks based on message type received.
'''
interesting_messages = list(callbacks.keys())
while not mavlink_thread_should_exit:
# send a heartbeat msg
conn.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_ONBOARD_CONTROLLER,
mavutil.mavlink.MAV_AUTOPILOT_GENERIC,
0,
0,
0)
m = conn.recv_match(type=interesting_messages, timeout=1, blocking=True)
if m is None:
continue
callbacks[m.get_type()](m)
# https://mavlink.io/en/messages/common.html#VISION_POSITION_ESTIMATE
def send_vision_position_estimate_message():
global current_time_us, H_aeroRef_aeroBody, reset_counter
with lock:
if H_aeroRef_aeroBody is not None:
# Setup angle data
rpy_rad = np.array( tf.euler_from_matrix(H_aeroRef_aeroBody, 'sxyz'))
# Setup covariance data, which is the upper right triangle of the covariance matrix, see here: https://files.gitter.im/ArduPilot/VisionProjects/1DpU/image.png
# Attemp #01: following this formula https://github.com/IntelRealSense/realsense-ros/blob/development/realsense2_camera/src/base_realsense_node.cpp#L1406-L1411
cov_pose = linear_accel_cov * pow(10, 3 - int(data.tracker_confidence))
cov_twist = angular_vel_cov * pow(10, 1 - int(data.tracker_confidence))
covariance = np.array([cov_pose, 0, 0, 0, 0, 0,
cov_pose, 0, 0, 0, 0,
cov_pose, 0, 0, 0,
cov_twist, 0, 0,
cov_twist, 0,
cov_twist])
# Send the message
conn.mav.vision_position_estimate_send(
current_time_us, # us Timestamp (UNIX time or time since system boot)
H_aeroRef_aeroBody[0][3], # Global X position
H_aeroRef_aeroBody[1][3], # Global Y position
H_aeroRef_aeroBody[2][3], # Global Z position
rpy_rad[0], # Roll angle
rpy_rad[1], # Pitch angle
rpy_rad[2], # Yaw angle
covariance, # Row-major representation of pose 6x6 cross-covariance matrix
reset_counter # Estimate reset counter. Increment every time pose estimate jumps.
)
# https://mavlink.io/en/messages/ardupilotmega.html#VISION_POSITION_DELTA
def send_vision_position_delta_message():
global current_time_us, current_confidence_level, H_aeroRef_aeroBody
with lock:
if H_aeroRef_aeroBody is not None:
# Calculate the deltas in position, attitude and time from the previous to current orientation
H_aeroRef_PrevAeroBody = send_vision_position_delta_message.H_aeroRef_PrevAeroBody
H_PrevAeroBody_CurrAeroBody = (np.linalg.inv(H_aeroRef_PrevAeroBody)).dot(H_aeroRef_aeroBody)
delta_time_us = current_time_us - send_vision_position_delta_message.prev_time_us
delta_position_m = [H_PrevAeroBody_CurrAeroBody[0][3], H_PrevAeroBody_CurrAeroBody[1][3], H_PrevAeroBody_CurrAeroBody[2][3]]
delta_angle_rad = np.array( tf.euler_from_matrix(H_PrevAeroBody_CurrAeroBody, 'sxyz'))
# Send the message
conn.mav.vision_position_delta_send(
current_time_us, # us: Timestamp (UNIX time or time since system boot)
delta_time_us, # us: Time since last reported camera frame
delta_angle_rad, # float[3] in radian: Defines a rotation vector in body frame that rotates the vehicle from the previous to the current orientation
delta_position_m, # float[3] in m: Change in position from previous to current frame rotated into body frame (0=forward, 1=right, 2=down)
current_confidence_level # Normalized confidence value from 0 to 100.
)
# Save static variables
send_vision_position_delta_message.H_aeroRef_PrevAeroBody = H_aeroRef_aeroBody
send_vision_position_delta_message.prev_time_us = current_time_us
# https://mavlink.io/en/messages/common.html#VISION_SPEED_ESTIMATE
def send_vision_speed_estimate_message():
global current_time_us, V_aeroRef_aeroBody, reset_counter
with lock:
if V_aeroRef_aeroBody is not None:
# Attemp #01: following this formula https://github.com/IntelRealSense/realsense-ros/blob/development/realsense2_camera/src/base_realsense_node.cpp#L1406-L1411
cov_pose = linear_accel_cov * pow(10, 3 - int(data.tracker_confidence))
covariance = np.array([cov_pose, 0, 0,
0, cov_pose, 0,
0, 0, cov_pose])
# Send the message
conn.mav.vision_speed_estimate_send(
current_time_us, # us Timestamp (UNIX time or time since system boot)
V_aeroRef_aeroBody[0][3], # Global X speed
V_aeroRef_aeroBody[1][3], # Global Y speed
V_aeroRef_aeroBody[2][3], # Global Z speed
covariance, # covariance
reset_counter # Estimate reset counter. Increment every time pose estimate jumps.
)
# Update the changes of confidence level on GCS and terminal
def update_tracking_confidence_to_gcs():
if data is not None and update_tracking_confidence_to_gcs.prev_confidence_level != data.tracker_confidence:
confidence_status_string = 'Tracking confidence: ' + pose_data_confidence_level[data.tracker_confidence]
send_msg_to_gcs(confidence_status_string)
update_tracking_confidence_to_gcs.prev_confidence_level = data.tracker_confidence
# https://mavlink.io/en/messages/common.html#STATUSTEXT
def send_msg_to_gcs(text_to_be_sent):
# MAV_SEVERITY: 0=EMERGENCY 1=ALERT 2=CRITICAL 3=ERROR, 4=WARNING, 5=NOTICE, 6=INFO, 7=DEBUG, 8=ENUM_END
text_msg = 'T265: ' + text_to_be_sent
conn.mav.statustext_send(mavutil.mavlink.MAV_SEVERITY_INFO, text_msg.encode())
progress("INFO: %s" % text_to_be_sent)
# Send a mavlink SET_GPS_GLOBAL_ORIGIN message (http://mavlink.org/messages/common#SET_GPS_GLOBAL_ORIGIN), which allows us to use local position information without a GPS.
def set_default_global_origin():
conn.mav.set_gps_global_origin_send(
1,
home_lat,
home_lon,
home_alt
)
# Send a mavlink SET_HOME_POSITION message (http://mavlink.org/messages/common#SET_HOME_POSITION), which allows us to use local position information without a GPS.
def set_default_home_position():
x = 0
y = 0
z = 0
q = [1, 0, 0, 0] # w x y z
approach_x = 0
approach_y = 0
approach_z = 1
conn.mav.set_home_position_send(
1,
home_lat,
home_lon,
home_alt,
x,
y,
z,
q,
approach_x,
approach_y,
approach_z
)
# Request a timesync update from the flight controller, for future work.
# TODO: Inspect the usage of timesync_update
def update_timesync(ts=0, tc=0):
if ts == 0:
ts = int(round(time.time() * 1000))
conn.mav.timesync_send(
tc, # tc1
ts # ts1
)
# Listen to attitude data to acquire heading when compass data is enabled
def att_msg_callback(value):
global heading_north_yaw
if heading_north_yaw is None:
heading_north_yaw = value.yaw
progress("INFO: Received first ATTITUDE message with heading yaw %.2f degrees" % m.degrees(heading_north_yaw))
#######################################
# Functions - T265
#######################################
def increment_reset_counter():
global reset_counter
if reset_counter >= 255:
reset_counter = 1
reset_counter += 1
# List of notification events: https://github.com/IntelRealSense/librealsense/blob/development/include/librealsense2/h/rs_types.h
# List of notification API: https://github.com/IntelRealSense/librealsense/blob/development/common/notifications.cpp
def realsense_notification_callback(notif):
progress("INFO: T265 event: " + notif)
if notif.get_category() is rs.notification_category.pose_relocalization:
increment_reset_counter()
send_msg_to_gcs('Relocalization detected')
def realsense_connect():
global pipe, pose_sensor
# Declare RealSense pipeline, encapsulating the actual device and sensors
pipe = rs.pipeline()
# Build config object before requesting data
cfg = rs.config()
# Enable the stream we are interested in
cfg.enable_stream(rs.stream.pose) # Positional data
# Configure callback for relocalization event
device = cfg.resolve(pipe).get_device()
pose_sensor = device.first_pose_sensor()
pose_sensor.set_notifications_callback(realsense_notification_callback)
# Start streaming with requested config
pipe.start(cfg)
#######################################
# Functions - Miscellaneous
#######################################
# Monitor user input from the terminal and perform action accordingly
def user_input_monitor():
global scale_factor
while True:
# Special case: updating scale
if scale_calib_enable == True:
scale_factor = float(input("INFO: Type in new scale as float number\n"))
progress("INFO: New scale is %s" % scale_factor)
if enable_auto_set_ekf_home:
send_msg_to_gcs('Set EKF home with default GPS location')
set_default_global_origin()
set_default_home_position()
time.sleep(1) # Wait a short while for FCU to start working
# Add new action here according to the key pressed.
# Enter: Set EKF home when user press enter
try:
c = input()
if c == "":
send_msg_to_gcs('Set EKF home with default GPS location')
set_default_global_origin()
set_default_home_position()
else:
progress("Got keyboard input %s" % c)
except IOError: pass
#######################################
# Main code starts here
#######################################
try:
progress("INFO: pyrealsense2 version: %s" % str(rs.__version__))
except Exception:
# fail silently
pass
progress("INFO: Starting Vehicle communications")
conn = mavutil.mavlink_connection(
connection_string,
autoreconnect = True,
source_system = 1,
source_component = 93,
baud=connection_baudrate,
force_connected=True,
)
mavlink_callbacks = {
'ATTITUDE': att_msg_callback,
}
mavlink_thread = threading.Thread(target=mavlink_loop, args=(conn, mavlink_callbacks))
mavlink_thread.start()
# connecting and configuring the camera is a little hit-and-miss.
# Start a timer and rely on a restart of the script to get it working.
# Configuring the camera appears to block all threads, so we can't do
# this internally.
# send_msg_to_gcs('Setting timer...')
signal.setitimer(signal.ITIMER_REAL, 5) # seconds...
send_msg_to_gcs('Connecting to camera...')
realsense_connect()
send_msg_to_gcs('Camera connected.')
signal.setitimer(signal.ITIMER_REAL, 0) # cancel alarm
# Send MAVlink messages in the background at pre-determined frequencies
sched = BackgroundScheduler()
if enable_msg_vision_position_estimate:
sched.add_job(send_vision_position_estimate_message, 'interval', seconds = 1/vision_position_estimate_msg_hz)
if enable_msg_vision_position_delta:
sched.add_job(send_vision_position_delta_message, 'interval', seconds = 1/vision_position_delta_msg_hz)
send_vision_position_delta_message.H_aeroRef_PrevAeroBody = tf.quaternion_matrix([1,0,0,0])
send_vision_position_delta_message.prev_time_us = int(round(time.time() * 1000000))
if enable_msg_vision_speed_estimate:
sched.add_job(send_vision_speed_estimate_message, 'interval', seconds = 1/vision_speed_estimate_msg_hz)
if enable_update_tracking_confidence_to_gcs:
sched.add_job(update_tracking_confidence_to_gcs, 'interval', seconds = 1/update_tracking_confidence_to_gcs_hz_default)
update_tracking_confidence_to_gcs.prev_confidence_level = -1
# A separate thread to monitor user input
if enable_user_keyboard_input:
user_keyboard_input_thread = threading.Thread(target=user_input_monitor)
user_keyboard_input_thread.daemon = True
user_keyboard_input_thread.start()
progress("INFO: Press Enter to set EKF home at default location")
sched.start()
# gracefully terminate the script if an interrupt signal (e.g. ctrl-c)
# is received. This is considered to be abnormal termination.
main_loop_should_quit = False
def sigint_handler(sig, frame):
global main_loop_should_quit
main_loop_should_quit = True
signal.signal(signal.SIGINT, sigint_handler)
# gracefully terminate the script if a terminate signal is received
# (e.g. kill -TERM).
def sigterm_handler(sig, frame):
global main_loop_should_quit
main_loop_should_quit = True
global exit_code
exit_code = 0
signal.signal(signal.SIGTERM, sigterm_handler)
if compass_enabled == 1:
time.sleep(1) # Wait a short while for yaw to be correctly initiated
send_msg_to_gcs('Sending vision messages to FCU')
try:
while not main_loop_should_quit:
# Wait for the next set of frames from the camera
frames = pipe.wait_for_frames()
# Fetch pose frame
pose = frames.get_pose_frame()
# Process data
if pose:
with lock:
# Store the timestamp for MAVLink messages
current_time_us = int(round(time.time() * 1000000))
# Pose data consists of translation and rotation
data = pose.get_pose_data()
# Confidence level value from T265: 0-3, remapped to 0 - 100: 0% - Failed / 33.3% - Low / 66.6% - Medium / 100% - High
current_confidence_level = float(data.tracker_confidence * 100 / 3)
# In transformations, Quaternions w+ix+jy+kz are represented as [w, x, y, z]!
H_T265Ref_T265body = tf.quaternion_matrix([data.rotation.w, data.rotation.x, data.rotation.y, data.rotation.z])
H_T265Ref_T265body[0][3] = data.translation.x * scale_factor
H_T265Ref_T265body[1][3] = data.translation.y * scale_factor
H_T265Ref_T265body[2][3] = data.translation.z * scale_factor
# Transform to aeronautic coordinates (body AND reference frame!)
H_aeroRef_aeroBody = H_aeroRef_T265Ref.dot( H_T265Ref_T265body.dot( H_T265body_aeroBody))
# Calculate GLOBAL XYZ speed (speed from T265 is already GLOBAL)
V_aeroRef_aeroBody = tf.quaternion_matrix([1,0,0,0])
V_aeroRef_aeroBody[0][3] = data.velocity.x
V_aeroRef_aeroBody[1][3] = data.velocity.y
V_aeroRef_aeroBody[2][3] = data.velocity.z
V_aeroRef_aeroBody = H_aeroRef_T265Ref.dot(V_aeroRef_aeroBody)
# Check for pose jump and increment reset_counter
if prev_data != None:
delta_translation = [data.translation.x - prev_data.translation.x, data.translation.y - prev_data.translation.y, data.translation.z - prev_data.translation.z]
delta_velocity = [data.velocity.x - prev_data.velocity.x, data.velocity.y - prev_data.velocity.y, data.velocity.z - prev_data.velocity.z]
position_displacement = np.linalg.norm(delta_translation)
speed_delta = np.linalg.norm(delta_velocity)
# Pose jump is indicated when position changes abruptly. The behavior is not well documented yet (as of librealsense 2.34.0)
jump_threshold = 0.1 # in meters, from trials and errors, should be relative to how frequent is the position data obtained (200Hz for the T265)
jump_speed_threshold = 20.0 # in m/s from trials and errors, should be relative to how frequent is the velocity data obtained (200Hz for the T265)
if (position_displacement > jump_threshold) or (speed_delta > jump_speed_threshold):
send_msg_to_gcs('VISO jump detected')
if position_displacement > jump_threshold:
progress("Position jumped by: %s" % position_displacement)
elif speed_delta > jump_speed_threshold:
progress("Speed jumped by: %s" % speed_delta)
increment_reset_counter()
prev_data = data
# Take offsets from body's center of gravity (or IMU) to camera's origin into account
if body_offset_enabled == 1:
H_body_camera = tf.euler_matrix(0, 0, 0, 'sxyz')
H_body_camera[0][3] = body_offset_x
H_body_camera[1][3] = body_offset_y
H_body_camera[2][3] = body_offset_z
H_camera_body = np.linalg.inv(H_body_camera)
H_aeroRef_aeroBody = H_body_camera.dot(H_aeroRef_aeroBody.dot(H_camera_body))
# Realign heading to face north using initial compass data
if compass_enabled == 1:
H_aeroRef_aeroBody = H_aeroRef_aeroBody.dot( tf.euler_matrix(0, 0, heading_north_yaw, 'sxyz'))
# Show debug messages here
if debug_enable == 1:
os.system('clear') # This helps in displaying the messages to be more readable
progress("DEBUG: Raw RPY[deg]: {}".format( np.array( tf.euler_from_matrix( H_T265Ref_T265body, 'sxyz')) * 180 / m.pi))
progress("DEBUG: NED RPY[deg]: {}".format( np.array( tf.euler_from_matrix( H_aeroRef_aeroBody, 'sxyz')) * 180 / m.pi))
progress("DEBUG: Raw pos xyz : {}".format( np.array( [data.translation.x, data.translation.y, data.translation.z])))
progress("DEBUG: NED pos xyz : {}".format( np.array( tf.translation_from_matrix( H_aeroRef_aeroBody))))
except Exception as e:
progress(e)
except:
send_msg_to_gcs('ERROR IN SCRIPT')
progress("Unexpected error: %s" % sys.exc_info()[0])
finally:
progress('Closing the script...')
# start a timer in case stopping everything nicely doesn't work.
signal.setitimer(signal.ITIMER_REAL, 5) # seconds...
pipe.stop()
mavlink_thread_should_exit = True
mavlink_thread.join()
conn.close()
progress("INFO: Realsense pipeline and vehicle object closed.")
sys.exit(exit_code) |
wsdump.py | #!/Users/erres/Desktop/venv/bin/python3
import argparse
import code
import six
import sys
import threading
import time
import websocket
from six.moves.urllib.parse import urlparse
try:
import readline
except:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values==None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v")+1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
return parser.parse_args()
class RawInput():
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if (args.proxy):
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if (args.origin):
options["origin"] = args.origin
if (args.subprotocols):
options["subprotocols"] = args.subprotocols
opts = {}
if (args.nocert):
opts = { "cert_reqs": websocket.ssl.CERT_NONE, "check_hostname": False }
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return (websocket.ABNF.OPCODE_CLOSE, None)
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return (frame.opcode, frame.data)
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return (frame.opcode, None)
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if not args.verbose and opcode in OPCODE_DATA:
msg = data
elif args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
if msg is not None:
if (args.timings):
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
main.py | #!/usr/bin/env python
# encoding: utf8
#
# Copyright © Burak Arslan <burak at arskom dot com dot tr>,
# Arskom Ltd. http://www.arskom.com.tr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the owner nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import multiprocessing
import logging
logger = logging.getLogger('spyne.wsgi')
logger.setLevel(logging.DEBUG)
import gobject
gobject.threads_init()
import gst
import zmq
context = zmq.Context()
from werkzeug.wsgi import DispatcherMiddleware
from werkzeug.exceptions import NotFound
from werkzeug.serving import run_simple
from spyne.application import Application
from spyne.decorator import rpc
from spyne.service import ServiceBase
from spyne.model.binary import ByteArray
from spyne.server.wsgi import WsgiApplication
from spyne.protocol.http import HttpRpc
port = 9000
url = 'stream'
video_device = "/dev/video0"
stream_socket = "tcp://127.0.0.1:5678"
header_socket = "tcp://127.0.0.1:5679"
# Use mplayer tv:// -tv device=/dev/video0 to get the default resolution of your
# webcam and adjust the width and height settings below.
# You need gstreamer, its python bindings and relevant plugins installed as well
# as werkzeug and pyzmq to run this example. easy_install werkzeug and
# easy_install pyzmq or pyzmq-static will install the python packages. refer to
# your distro's documentation on installing gstreamer and friends.
# FIXME: This is most probably a linux-specific example. Users of other
# operating systems; Patches are welcome!
# FIXME: Does not flush non-keyframe data after sending stream headers like
# multifdsink does. Patches are welcome!
# FIXME: Does not support audio. I imagine some small tweak to the below gst
# pipeline would "Just Work". Patches are welcome!
v4l2_pipeline = (
'v4l2src device=%s '
'! video/x-raw-yuv '
'! videoscale ! video/x-raw-yuv, width=400, height=300 '
'! videorate ! video/x-raw-yuv,framerate=25/2 '
'! ffmpegcolorspace '
'! theoraenc quality=32 ! oggmux ! appsink name=sink sync=False' % video_device)
# use this if you want to publish your screen.
xsrc_pipeline = (
'ximagesrc '
'! video/x-raw-rgb,framerate=2/1 '
'! ffmpegcolorspace '
'! theoraenc quality=32 ! oggmux ! appsink name=sink sync=False' )
def camera_publisher():
# init gst
pipeline = gst.parse_launch(v4l2_pipeline)
pipeline.set_state(gst.STATE_PLAYING)
appsink = pipeline.get_by_name('sink')
buffer = appsink.emit('pull-preroll')
caps = buffer.get_caps()[0]
stream_header = ""
if "streamheader" in caps:
stream_header = ''.join([str(h) for h in caps["streamheader"]])
# init zeromq
inner_context = zmq.Context()
# send stream header to the http daemon
socket = inner_context.socket(zmq.REP)
socket.bind(header_socket)
socket.recv()
socket.send(str(stream_header))
socket.close()
# publish stream
publisher = inner_context.socket(zmq.PUB)
publisher.bind(stream_socket)
while True:
buf = appsink.emit('pull-buffer')
publisher.send(str(buf))
class StreamingService(ServiceBase):
stream_header = ""
@rpc(_returns=ByteArray)
def webcam(ctx):
yield StreamingService.stream_header
socket = context.socket(zmq.SUB)
socket.connect(stream_socket)
socket.setsockopt(zmq.SUBSCRIBE, "")
while True:
yield socket.recv()
def main():
# start publisher process
p = multiprocessing.Process(target=camera_publisher)
p.start()
stream_app = WsgiApplication(Application([StreamingService],
tns='spyne.examples.stream',
in_protocol=HttpRpc(),
out_protocol=HttpRpc(mime_type='video/ogg'),
))
root_app = DispatcherMiddleware(NotFound(), {'/stream': stream_app})
# get stream header from the publisher process
socket = context.socket(zmq.REQ)
socket.connect(header_socket)
socket.send("hey")
StreamingService.stream_header = socket.recv()
socket.close()
# have fun!
run_simple('0.0.0.0', port, root_app, static_files={'/':"."}, threaded=True)
if __name__ == '__main__':
import sys
sys.exit(main())
|
example_03_threading.py | #!/usr/bin/env python
# Tai Sakuma <tai.sakuma@gmail.com>
import time, random
import threading
from atpbar import atpbar, flush
##__________________________________________________________________||
def task(n, name):
for i in atpbar(range(n), name=name):
time.sleep(0.0001)
##__________________________________________________________________||
nthreads = 5
threads = [ ]
for i in range(nthreads):
name = 'thread {}'.format(i)
n = random.randint(5, 10000)
t = threading.Thread(target=task, args=(n, name))
t.start()
threads.append(t)
for t in threads:
t.join()
flush()
##__________________________________________________________________||
|
slurm-sidecar.py | #!/usr/bin/env python3
"""Run a Snakemake v7+ sidecar process for Slurm
This sidecar process will poll ``squeue --me --format='%i,%T'`` every 60
seconds by default (use environment variable ``SNAKEMAKE_SLURM_SQUEUE_WAIT``
for adjusting this).
Note that you have to adjust the value to fit to your ``MinJobAge`` Slurm
configuration. Jobs remain at least ``MinJobAge`` seconds known to the
Slurm controller (default of 300 seconds). If you query ``squeue`` every
60 seconds then this is plenty and you will observe all relevant job status
states as they are relevant for Snakemake.
If the environment variable ``SNAKEMAKE_CLUSTER_SIDECAR_VARS`` is set then
the ``slurm-status.py`` of the slurm profile will attempt to query this
sidecar process via HTTP. As the sidecar process does not update its
cache in real-time, setting ``SNAKEMAKE_SLURM_SQUEUE_WAIT`` too large might
lead to Snakemake missing the "done" job state. The defaults of
``SNAKEMAKE_SLURM_SQUEUE_WAIT=60`` and Slurm's ``MinJobAge=600`` work well
together and you will see all relevant job statuses.
If the sidecar is queried for a job ID that it has not seen yet then it will
perform a query to ``sacct`` such that it works well if Snakemake "resume
external job" feature. The ``slurm-submit.py`` script of the Snakemake profile
will register all jobs via POST with this sidecar.
"""
import http.server
import json
import logging
import os
import subprocess
import sys
import signal
import time
import threading
import uuid
from CookieCutter import CookieCutter
#: Enables debug messages for slurm sidecard.
DEBUG = bool(int(os.environ.get("SNAKEMAKE_SLURM_DEBUG", "0")))
#: Command to call when calling squeue
SQUEUE_CMD = os.environ.get("SNAKEMAKE_SLURM_SQUEUE_CMD", "squeue")
#: Number of seconds to wait between ``squeue`` calls.
SQUEUE_WAIT = int(os.environ.get("SNAKEMAKE_SLURM_SQUEUE_WAIT", "60"))
logger = logging.getLogger(__name__)
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
logger.setLevel(logging.DEBUG)
class PollSqueueThread(threading.Thread):
"""Thread that polls ``squeue`` until stopped by ``stop()``"""
def __init__(
self,
squeue_wait,
squeue_cmd,
squeue_timeout=2,
sleep_time=0.01,
max_tries=3,
*args,
**kwargs
):
super().__init__(target=self._work, *args, **kwargs)
#: Time to wait between squeue calls.
self.squeue_wait = squeue_wait
#: Command to call squeue with.
self.squeue_cmd = squeue_cmd
#: Whether or not the thread should stop.
self.stopped = threading.Event()
#: Previous call to ``squeue``
self.prev_call = 0.0
#: Time to sleep between iterations in seconds. Thread can only be
#: terminated after this interval when waiting.
self.sleep_time = sleep_time
#: Maximal running time to accept for call to ``squeue``.
self.squeue_timeout = squeue_timeout
#: Maximal number of tries if call to ``squeue`` fails.
self.max_tries = max_tries
#: Dict mapping the job id to the job state string.
self.states = {}
#: Make at least one call to squeue, must not fail.
logger.debug("initializing trhead")
self._call_squeue(allow_failure=False)
self.prev_call = time.time()
def _work(self):
"""Execute the thread's action"""
while not self.stopped.is_set():
now = time.time()
if now - self.prev_call > self.squeue_wait:
self._call_squeue()
self.prev_call = now
time.sleep(self.sleep_time)
def get_state(self, jobid):
"""Return the job state for the given jobid."""
jobid = str(jobid)
if jobid not in self.states:
self.states[jobid] = self._get_state_sacct(jobid)
return self.states.get(jobid, "__not_seen_yet__")
def register_job(self, jobid):
"""Register job with the given ID."""
self.states.setdefault(jobid, None)
def _get_state_sacct(self, jobid):
"""Implement retrieving state via sacct for resuming jobs."""
cluster = CookieCutter.get_cluster_option()
cmd = ["sacct", "-P", "-b", "-j", jobid, "-n"]
if cluster:
cmd.append(cluster)
try_num = 0
while try_num < self.max_tries:
try_num += 1
try:
logger.debug("Calling %s (try %d)", cmd, try_num)
output = subprocess.check_output(cmd, timeout=self.squeue_timeout, text=True)
break
except subprocess.TimeoutExpired as e:
logger.debug("Call to %s timed out (try %d of %d)", cmd, try_num, self.max_tries)
except subprocess.CalledProcessError as e:
logger.debug("Call to %s failed (try %d of %d)", cmd, try_num, self.max_tries)
if try_num >= self.max_tries:
raise Exception("Problem with call to %s" % cmd)
else:
parsed = {x.split("|")[0]: x.split("|")[1] for x in output.strip().split("\n")}
logger.debug("Returning state of %s as %s", jobid, parsed[jobid])
return parsed[jobid]
def stop(self):
"""Flag thread to stop execution"""
logger.debug("stopping thread")
self.stopped.set()
def _call_squeue(self, allow_failure=True):
"""Run the call to ``squeue``"""
cluster = CookieCutter.get_cluster_option()
try_num = 0
cmd = [SQUEUE_CMD, "--me", "--format=%i,%T", "--state=all"]
if cluster:
cmd.append(cluster)
while try_num < self.max_tries:
try_num += 1
try:
logger.debug("Calling %s (try %d)", cmd, try_num)
output = subprocess.check_output(cmd, timeout=self.squeue_timeout, text=True)
logger.debug("Output is:\n---\n%s\n---", output)
break
except subprocess.TimeoutExpired as e:
if not allow_failure:
raise
logger.debug("Call to %s timed out (try %d of %d)", cmd, try_num, self.max_tries)
except subprocess.CalledProcessError as e:
if not allow_failure:
raise
logger.debug("Call to %s failed (try %d of %d)", cmd, try_num, self.max_tries)
if try_num >= self.max_tries:
logger.debug("Giving up for this round")
else:
logger.debug("parsing output")
self._parse_output(output)
def _parse_output(self, output):
"""Parse output of ``squeue`` call."""
header = None
for line in output.splitlines():
line = line.strip()
arr = line.split(",")
if not header:
if not line.startswith("JOBID"):
continue # skip leader
header = arr
else:
logger.debug("Updating state of %s to %s", arr[0], arr[1])
self.states[arr[0]] = arr[1]
class JobStateHttpHandler(http.server.BaseHTTPRequestHandler):
"""HTTP handler class that responds to ```/job/status/${jobid}/`` GET requests"""
def do_GET(self):
"""Only to ``/job/status/${job_id}/?``"""
logger.debug("--- BEGIN GET")
# Remove trailing slashes from path.
path = self.path
while path.endswith("/"):
path = path[:-1]
# Ensure that /job/status was requested
if not self.path.startswith("/job/status/"):
self.send_response(400)
self.end_headers()
return
# Ensure authentication bearer is correct
auth_required = "Bearer %s" % self.server.http_secret
auth_header = self.headers.get("Authorization")
logger.debug(
"Authorization header is %s, required: %s" % (repr(auth_header), repr(auth_required))
)
if auth_header != auth_required:
self.send_response(403)
self.end_headers()
return
# Otherwise, query job ID status
job_id = self.path[len("/job/status/") :]
logger.debug("Querying for job ID %s" % repr(job_id))
status = self.server.poll_thread.get_state(job_id)
logger.debug("Status: %s" % status)
if not status:
self.send_response(404)
self.end_headers()
else:
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
output = json.dumps({"status": status})
logger.debug("Sending %s" % repr(output))
self.wfile.write(output.encode("utf-8"))
logger.debug("--- END GET")
def do_POST(self):
"""Handle POSTs (only to ``/job/register/${job_id}/?``)"""
logger.debug("--- BEGIN POST")
# Remove trailing slashes from path.
path = self.path
while path.endswith("/"):
path = path[:-1]
# Ensure that /job/register was requested
if not self.path.startswith("/job/register/"):
self.send_response(400)
self.end_headers()
return
# Ensure authentication bearer is correct
auth_required = "Bearer %s" % self.server.http_secret
auth_header = self.headers.get("Authorization")
logger.debug(
"Authorization header is %s, required: %s", repr(auth_header), repr(auth_required)
)
# Otherwise, register job ID
job_id = self.path[len("/job/status/") :]
self.server.poll_thread.register_job(job_id)
self.send_response(200)
self.end_headers()
logger.debug("--- END POST")
class JobStateHttpServer(http.server.HTTPServer):
"""The HTTP server class"""
allow_reuse_address = False
def __init__(self, poll_thread):
"""Initialize thread and print the ``SNAKEMAKE_CLUSTER_SIDECAR_VARS`` to stdout, then flush."""
super().__init__(("0.0.0.0", 0), JobStateHttpHandler)
#: The ``PollSqueueThread`` with the state dictionary.
self.poll_thread = poll_thread
#: The secret to use.
self.http_secret = str(uuid.uuid4())
sidecar_vars = {
"server_port": self.server_port,
"server_secret": self.http_secret,
"pid": os.getpid(),
}
logger.debug(json.dumps(sidecar_vars))
sys.stdout.write(json.dumps(sidecar_vars) + "\n")
sys.stdout.flush()
def log_message(self, *args, **kwargs):
"""Log messages are printed if ``DEBUG`` is ``True``."""
if DEBUG:
super().log_message(*args, **kwargs)
def main():
# Start thread to poll ``squeue`` in a controlled fashion.
poll_thread = PollSqueueThread(SQUEUE_WAIT, SQUEUE_CMD, name="poll-squeue")
poll_thread.start()
# Initialize HTTP server that makes available the output of ``squeue --me`` in a
# controlled fashion.
http_server = JobStateHttpServer(poll_thread)
http_thread = threading.Thread(name="http-server", target=http_server.serve_forever)
http_thread.start()
# Allow for graceful shutdown of poll thread and HTTP server.
def signal_handler(signum, frame):
"""Handler for Unix signals. Shuts down http_server and poll_thread."""
logger.info("Shutting down squeue poll thread and HTTP server...")
# from remote_pdb import set_trace
# set_trace()
poll_thread.stop()
http_server.shutdown()
logger.info("... HTTP server and poll thread shutdown complete.")
for thread in threading.enumerate():
logger.info("ACTIVE %s", thread.name)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Actually run the server.
poll_thread.join()
logger.debug("poll_thread done")
http_thread.join()
logger.debug("http_thread done")
if __name__ == "__main__":
sys.exit(int(main() or 0))
|
core.py | import logging
import re
import sys
import os
import time
from threading import Thread, Event
from datetime import datetime, timedelta
from collections import deque
try:
from Queue import Queue
except ImportError:
from queue import Queue
import boto3
from botocore.compat import total_seconds
from termcolor import colored
from dateutil.parser import parse
from . import exceptions
from operator import itemgetter, attrgetter, methodcaller
def milis2iso(milis):
res = datetime.utcfromtimestamp(milis/1000.0).isoformat()
return (res + ".000")[:23] + 'Z'
log = logging.getLogger(__name__)
class AWSLogs(object):
ACTIVE = 1
EXHAUSTED = 2
WATCH_SLEEP = 2
FILTER_LOG_EVENTS_STREAMS_LIMIT = 300
MAX_EVENTS_PER_CALL = 10000
ALL_WILDCARD = 'ALL'
def __init__(self, **kwargs):
self.aws_region = kwargs.get('aws_region')
self.aws_access_key_id = kwargs.get('aws_access_key_id')
self.aws_secret_access_key = kwargs.get('aws_secret_access_key')
self.aws_session_token = kwargs.get('aws_session_token')
self.log_group_name = kwargs.get('log_group_name')
self.api_id = kwargs.get('api_id')
self.stage = kwargs.get('stage')
self.log_stream_name = kwargs.get('log_stream_name')
self.filter_pattern = kwargs.get('filter_pattern')
self.watch = kwargs.get('watch')
self.color_enabled = kwargs.get('color_enabled')
self.output_stream_enabled = kwargs.get('output_stream_enabled')
self.output_group_enabled = kwargs.get('output_group_enabled')
self.output_timestamp_enabled = kwargs.get('output_timestamp_enabled')
self.output_ingestion_time_enabled = kwargs.get(
'output_ingestion_time_enabled')
self.start = self.parse_datetime(kwargs.get('start'))
self.end = self.parse_datetime(kwargs.get('end'))
self.next_tokens = {}
self.client = boto3.client(
'logs',
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token,
region_name=self.aws_region
)
self.apigClient = boto3.client(
'apigateway',
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token,
region_name=self.aws_region
)
def _get_streams_from_pattern(self, group, pattern):
"""Returns streams in ``group`` matching ``pattern``."""
pattern = '.*' if pattern == self.ALL_WILDCARD else pattern
reg = re.compile('^{0}'.format(pattern))
# print pattern
for stream in self.get_streams(group):
if re.match(reg, stream):
yield stream
def get_lambda_function_names(self, apiId, stage):
# todo: get functions from actual deployment. SDK needs to support embed=apisummary parameter
# stage = self.apigClient.get_stage(restApiId=apiId, stageName=stage)
# dep_id = stage['deploymentId']
# dep = self.apigClient.get_deployment(restApiId=apiId, deploymentId=dep_id)
# print dep['apiSummary']
names = []
resources = self.apigClient.get_resources(restApiId=apiId)['items']
# note: this currently returns the lambda functions from the head revision, which may be different than the deployed version
for resource in resources:
if 'resourceMethods' in resource:
methods = resource['resourceMethods']
for method in methods:
integ = self.apigClient.get_integration(restApiId=apiId,
resourceId=resource['id'],
httpMethod=method)
if (integ['type'] == "AWS" or integ['type'] == "AWS_PROXY") and "lambda:path/2015-03-31/functions" in integ['uri']:
uri = integ['uri']
start = uri.find(":function:")
end = uri.find("/invocations")
name = uri[start + 10:end]
names.append(name)
return names
def list_logs(self):
streams = []
if self.log_stream_name != self.ALL_WILDCARD:
streams = list(self._get_streams_from_pattern(self.log_group_name, self.log_stream_name))
if len(streams) > self.FILTER_LOG_EVENTS_STREAMS_LIMIT:
raise exceptions.TooManyStreamsFilteredError(
self.log_stream_name,
len(streams),
self.FILTER_LOG_EVENTS_STREAMS_LIMIT
)
if len(streams) == 0:
raise exceptions.NoStreamsFilteredError(self.log_stream_name)
max_stream_length = max([len(s) for s in streams]) if streams else 10
group_length = len(self.log_group_name)
queue, exit = Queue(), Event()
def update_next_token(response, kwargs):
group = kwargs['logGroupName']
if 'nextToken' in response:
next = response['nextToken']
self.next_tokens[group] = next
#print "Updated tokens"
#print self.next_tokens
else:
if group in self.next_tokens:
del self.next_tokens[group]
if self.watch:
time.sleep(0.2)
## todo: remove shared kwargs
def list_lambda_logs(allevents, kwargs):
# add events from lambda function streams
fxns = self.get_lambda_function_names(self.api_id, self.stage)
for fxn in fxns:
lambda_group = ("/aws/lambda/" + fxn).split(':')[0]
kwargs['logGroupName'] = lambda_group
if lambda_group in self.next_tokens:
kwargs['nextToken'] = self.next_tokens[lambda_group]
else:
if 'nextToken' in kwargs:
del kwargs['nextToken']
try:
lambda_response = filter_log_events(**kwargs)
events = lambda_response.get('events', [])
for event in events:
event['group_name'] = lambda_group
allevents.append(event)
update_next_token(lambda_response, kwargs)
except Exception as e:
log.warning("Error fetching logs for Lambda function {0}"
" with group {1}. This function may need to be"
" invoked.".format(fxn, lambda_group, e))
return allevents
## todo: remove shared kwargs
def list_apigateway_logs(allevents, kwargs):
# add events from API Gateway streams
kwargs['logGroupName'] = self.log_group_name
if self.log_group_name in self.next_tokens:
kwargs['nextToken'] = self.next_tokens[self.log_group_name]
else:
if 'nextToken' in kwargs:
del kwargs['nextToken']
try:
apigresponse = filter_log_events(**kwargs)
except Exception as e:
log.error(
"Error fetching logs for API {0}. Please ensure logging "
"is enabled for this API and the API is deployed. See "
"http://docs.aws.amazon.com/apigateway/latest/"
"developerguide/how-to-stage-settings.html: {1}"
.format(self.api_id, e))
raise
events = apigresponse.get('events', [])
for event in events:
event['group_name'] = self.log_group_name
allevents.append(event)
update_next_token(apigresponse, kwargs)
return allevents
def filter_log_events(**kwargs):
try:
resp = self.client.filter_log_events(**kwargs)
if 'nextToken' in resp:
group = kwargs['logGroupName']
next = resp['nextToken']
#print "Resp: Group: " + group + " nextToken: " + next
#print resp
return resp
except Exception as e:
log.error("Caught error from CloudWatch: {0}".format(e))
raise
def consumer():
while not exit.is_set():
event = queue.get()
if event is None:
exit.set()
break
output = []
if self.output_group_enabled:
output.append(
self.color(
event['group_name'].ljust(group_length, ' '),
'green'
)
)
if self.output_stream_enabled:
output.append(
self.color(
event['logStreamName'].ljust(max_stream_length,
' '),
'cyan'
)
)
if self.output_timestamp_enabled:
output.append(
self.color(
milis2iso(event['timestamp']),
'yellow'
)
)
if self.output_ingestion_time_enabled:
output.append(
self.color(
milis2iso(event['ingestionTime']),
'blue'
)
)
output.append(event['message'])
print(' '.join(output))
sys.stdout.flush()
def generator():
"""Push events into queue trying to deduplicate them using a lru queue.
AWS API stands for the interleaved parameter that:
interleaved (boolean) -- If provided, the API will make a best
effort to provide responses that contain events from multiple
log streams within the log group interleaved in a single
response. That makes some responses return some subsequent
response duplicate events. In a similar way when awslogs is
called with --watch option, we need to findout which events we
have alredy put in the queue in order to not do it several
times while waiting for new ones and reusing the same
next_token. The site of this queue is MAX_EVENTS_PER_CALL in
order to not exhaust the memory.
"""
interleaving_sanity = deque(maxlen=self.MAX_EVENTS_PER_CALL)
kwargs = {'logGroupName': self.log_group_name,
'interleaved': True}
if streams:
kwargs['logStreamNames'] = streams
if self.start:
kwargs['startTime'] = self.start
if self.end:
kwargs['endTime'] = self.end
if self.filter_pattern:
kwargs['filterPattern'] = self.filter_pattern
while not exit.is_set():
allevents = []
list_apigateway_logs(allevents, kwargs)
list_lambda_logs(allevents, kwargs)
sorted(allevents, key=itemgetter('timestamp'))
for event in allevents:
if event['eventId'] not in interleaving_sanity:
interleaving_sanity.append(event['eventId'])
queue.put(event)
# Send the exit signal if no more pages and not in watch mode
if not self.watch and not self.next_tokens:
queue.put(None)
g = Thread(target=generator)
g.start()
c = Thread(target=consumer)
c.start()
try:
while not exit.is_set():
time.sleep(.1)
except (KeyboardInterrupt, SystemExit):
exit.set()
print('Closing...\n')
os._exit(0)
def list_groups(self):
"""Lists available CloudWatch logs groups"""
for group in self.get_groups():
print(group)
def list_streams(self):
"""Lists available CloudWatch logs streams in ``log_group_name``."""
for stream in self.get_streams():
print(stream)
def get_groups(self):
"""Returns available CloudWatch logs groups"""
paginator = self.client.get_paginator('describe_log_groups')
for page in paginator.paginate():
for group in page.get('logGroups', []):
yield group['logGroupName']
def get_streams(self, log_group_name=None):
"""Returns available CloudWatch logs streams in ``log_group_name``."""
kwargs = {'logGroupName': log_group_name or self.log_group_name}
window_start = self.start or 0
window_end = self.end or sys.float_info.max
paginator = self.client.get_paginator('describe_log_streams')
for page in paginator.paginate(**kwargs):
for stream in page.get('logStreams', []):
if 'firstEventTimestamp' not in stream:
# This is a specified log stream rather than
# a filter on the whole log group, so there's
# no firstEventTimestamp.
yield stream['logStreamName']
elif max(stream['firstEventTimestamp'], window_start) <= \
min(stream['lastEventTimestamp'], window_end):
yield stream['logStreamName']
def color(self, text, color):
"""Returns coloured version of ``text`` if ``color_enabled``."""
if self.color_enabled:
return colored(text, color)
return text
def parse_datetime(self, datetime_text):
"""Parse ``datetime_text`` into a ``datetime``."""
if not datetime_text:
return None
ago_regexp = r'(\d+)\s?(m|minute|minutes|h|hour|hours|d|day|days|w|weeks|weeks)(?: ago)?'
ago_match = re.match(ago_regexp, datetime_text)
if ago_match:
amount, unit = ago_match.groups()
amount = int(amount)
unit = {'m': 60, 'h': 3600, 'd': 86400, 'w': 604800}[unit[0]]
date = datetime.utcnow() + timedelta(seconds=unit * amount * -1)
else:
try:
date = parse(datetime_text)
except ValueError:
raise exceptions.UnknownDateError(datetime_text)
return int(total_seconds(date - datetime(1970, 1, 1))) * 1000
|
autocast_variable_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AutoCastVariable."""
import os
import threading
from absl.testing import parameterized
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.distribute import combinations as ds_combinations
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_combinations as combinations
from tensorflow.python.keras.mixed_precision import autocast_variable
from tensorflow.python.keras.optimizer_v2 import adadelta
from tensorflow.python.keras.optimizer_v2 import adagrad
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.keras.optimizer_v2 import adamax
from tensorflow.python.keras.optimizer_v2 import ftrl
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.keras.optimizer_v2 import nadam
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent as gradient_descent_v1
from tensorflow.python.training.tracking import util as trackable_utils
maybe_distribute = combinations.combine(distribution=[
strategy_combinations.default_strategy,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2
])
def get_var(val, dtype, name=None):
return variables.VariableV1(val, use_resource=True, dtype=dtype, name=name)
def set_cpu_logical_devices_to_at_least(num):
"""Create cpu logical devices of at least a given number."""
physical_devices = config.list_physical_devices('CPU')
if not physical_devices:
raise RuntimeError('No CPU found')
if len(physical_devices) >= num:
return
# By default each physical device corresponds to one logical device. We create
# multiple logical devices for the last physical device so that we have `num`
# logical devices.
num = num - len(physical_devices) + 1
logical_devices = []
for _ in range(num):
logical_devices.append(context.LogicalDeviceConfiguration())
# Create logical devices from the last device since sometimes the first GPU
# is the primary graphic card and may have less memory available.
config.set_logical_device_configuration(physical_devices[-1], logical_devices)
@ds_combinations.generate(combinations.combine(mode=['graph', 'eager']))
class AutoCastVariableTest(test.TestCase, parameterized.TestCase):
def setUp(self):
set_cpu_logical_devices_to_at_least(3)
super(AutoCastVariableTest, self).setUp()
@ds_combinations.generate(maybe_distribute)
def test_read(self, distribution):
with distribution.scope():
x = get_var(1., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
# outside of auto cast scope.
self.assertEqual(x.dtype, dtypes.float32)
self.assertEqual(x.value().dtype, dtypes.float32)
self.assertEqual(x.read_value().dtype, dtypes.float32)
self.assertEqual(array_ops.identity(x).dtype, dtypes.float32)
# within auto cast scope of different dtype
with autocast_variable.enable_auto_cast_variables(dtypes.float16):
self.assertEqual(x.dtype, dtypes.float32)
self.assertEqual(x.value().dtype, dtypes.float16)
self.assertEqual(x.read_value().dtype, dtypes.float16)
self.assertEqual(array_ops.identity(x).dtype, dtypes.float16)
# within auto cast scope of same dtype
with autocast_variable.enable_auto_cast_variables(dtypes.float32):
self.assertEqual(x.dtype, dtypes.float32)
self.assertEqual(x.value().dtype, dtypes.float32)
self.assertEqual(x.read_value().dtype, dtypes.float32)
self.assertEqual(array_ops.identity(x).dtype, dtypes.float32)
def test_sparse_reads(self):
x = get_var([1., 2], dtypes.float32)
# DistributedVariables do not support sparse_read or gather_nd, so we pass
# distribute=False
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
self.assertEqual(x.sparse_read([0]).dtype, dtypes.float32)
self.assertEqual(x.gather_nd([0]).dtype, dtypes.float32)
with autocast_variable.enable_auto_cast_variables(dtypes.float16):
self.assertEqual(x.sparse_read([0]).dtype, dtypes.float16)
self.assertEqual(x.gather_nd([0]).dtype, dtypes.float16)
@ds_combinations.generate(maybe_distribute)
def test_read_nested_scopes(self, distribution):
with distribution.scope():
x = get_var(1., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
with autocast_variable.enable_auto_cast_variables(dtypes.float16):
self.assertEqual(x.read_value().dtype, dtypes.float16)
with autocast_variable.enable_auto_cast_variables(dtypes.float32):
self.assertEqual(x.read_value().dtype, dtypes.float32)
self.assertEqual(x.read_value().dtype, dtypes.float16)
@ds_combinations.generate(maybe_distribute)
def test_dtype_is_not_string(self, distribution):
with distribution.scope():
x = get_var(1., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
self.assertEqual(x.dtype, dtypes.float32)
self.assertIsInstance(x.dtype, dtypes.DType)
self.assertEqual(x.true_dtype, dtypes.float32)
self.assertIsInstance(x.true_dtype, dtypes.DType)
dtype = dtypes.float16
with autocast_variable.enable_auto_cast_variables(dtype):
self.assertEqual(x.dtype, dtypes.float32)
self.assertIsInstance(x.dtype, dtypes.DType)
self.assertEqual(x.true_dtype, dtypes.float32)
self.assertIsInstance(x.true_dtype, dtypes.DType)
@ds_combinations.generate(maybe_distribute)
def test_method_delegations(self, distribution):
# Test AutoCastVariable correctly delegates Variable methods to the
# underlying variable.
with self.test_session(), distribution.scope():
for read_dtype in (dtypes.float32, dtypes.float16):
if ds_context.has_strategy():
# MirroredVariable.assign will (incorrectly) return a Mirrored value
# instead of a MirroredVariable. So we cannot properly wrap it in an
# AutoCastVariable.
evaluate = self.evaluate
else:
def evaluate(var):
self.assertIsInstance(var, autocast_variable.AutoCastVariable)
self.assertEqual(array_ops.identity(var).dtype, read_dtype) # pylint: disable=cell-var-from-loop
return self.evaluate(var)
x = get_var(7., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
with autocast_variable.enable_auto_cast_variables(read_dtype):
self.evaluate(x.initializer)
self.assertEqual(self.evaluate(x.value()), 7)
self.assertEqual(self.evaluate(x.read_value()), 7)
self.assertTrue(x.trainable)
self.assertEqual(x.synchronization, x._variable.synchronization)
self.assertEqual(x.aggregation, x._variable.aggregation)
self.assertEqual(self.evaluate(x.initialized_value()), 7)
if not context.executing_eagerly():
if not ds_context.has_strategy():
# These functions are not supported for DistributedVariables
x.load(9)
self.assertEqual(x.eval(), 9)
self.assertEqual(self.evaluate(x.initial_value), 7)
self.assertEqual(x.op, x._variable.op)
self.assertEqual(x.graph, x._variable.graph)
if not ds_context.has_strategy():
# These attributes are not supported for DistributedVariables
self.assertIsNone(x.constraint)
self.assertEqual(x.initializer, x._variable.initializer)
self.assertEqual(evaluate(x.assign(8)), 8)
self.assertEqual(evaluate(x.assign_add(2)), 10)
self.assertEqual(evaluate(x.assign_sub(3)), 7)
self.assertEqual(x.name, x._variable.name)
self.assertEqual(x.device, x._variable.device)
self.assertEqual(x.shape, ())
self.assertEqual(x.get_shape(), ())
if not ds_context.has_strategy():
# Test scatter_* methods. These are not supported for
# DistributedVariables
x = get_var([7, 8], dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
with autocast_variable.enable_auto_cast_variables(read_dtype):
self.evaluate(x.initializer)
self.assertAllEqual(self.evaluate(x.value()), [7, 8])
def slices(val, index):
return indexed_slices.IndexedSlices(
values=constant_op.constant(val, dtype=dtypes.float32),
indices=constant_op.constant(index, dtype=dtypes.int32),
dense_shape=constant_op.constant([2], dtype=dtypes.int32))
self.assertAllEqual(evaluate(x.scatter_sub(slices(1., 0))), [6, 8])
self.assertAllEqual(evaluate(x.scatter_add(slices(1., 0))), [7, 8])
self.assertAllEqual(evaluate(x.scatter_max(slices(9., 1))), [7, 9])
self.assertAllEqual(evaluate(x.scatter_min(slices(8., 1))), [7, 8])
self.assertAllEqual(evaluate(x.scatter_mul(slices(2., 1))), [7, 16])
self.assertAllEqual(evaluate(x.scatter_div(slices(2., 1))), [7, 8])
self.assertAllEqual(
evaluate(x.scatter_update(slices(4., 1))), [7, 4])
self.assertAllEqual(
evaluate(x.scatter_nd_sub([[0], [1]], [1., 2.])), [6, 2])
self.assertAllEqual(
evaluate(x.scatter_nd_add([[0], [1]], [1., 2.])), [7, 4])
self.assertAllEqual(
evaluate(x.scatter_nd_update([[0], [1]], [1., 2.])), [1, 2])
@ds_combinations.generate(maybe_distribute)
def test_operator_overloads(self, distribution):
with distribution.scope():
for read_dtype in (dtypes.float32, dtypes.float16):
x = get_var(7., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
with autocast_variable.enable_auto_cast_variables(read_dtype):
self.evaluate(x.initializer)
self.assertAlmostEqual(8, self.evaluate(x + 1))
self.assertAlmostEqual(10, self.evaluate(3 + x))
self.assertAlmostEqual(14, self.evaluate(x + x))
self.assertAlmostEqual(5, self.evaluate(x - 2))
self.assertAlmostEqual(6, self.evaluate(13 - x))
self.assertAlmostEqual(0, self.evaluate(x - x))
self.assertAlmostEqual(14, self.evaluate(x * 2))
self.assertAlmostEqual(21, self.evaluate(3 * x))
self.assertAlmostEqual(49, self.evaluate(x * x))
self.assertAlmostEqual(3.5, self.evaluate(x / 2))
self.assertAlmostEqual(1.5, self.evaluate(10.5 / x))
self.assertAlmostEqual(3, self.evaluate(x // 2))
self.assertAlmostEqual(2, self.evaluate(15 // x))
if read_dtype == dtypes.float32:
# The "mod" operator does not support float16
self.assertAlmostEqual(1, self.evaluate(x % 2))
self.assertAlmostEqual(2, self.evaluate(16 % x))
self.assertTrue(self.evaluate(x < 12))
self.assertTrue(self.evaluate(x <= 12))
self.assertFalse(self.evaluate(x > 12))
self.assertFalse(self.evaluate(x >= 12))
self.assertFalse(self.evaluate(12 < x))
self.assertFalse(self.evaluate(12 <= x))
self.assertTrue(self.evaluate(12 > x))
self.assertTrue(self.evaluate(12 >= x))
self.assertAlmostEqual(343, self.evaluate(pow(x, 3)), places=4)
self.assertAlmostEqual(128, self.evaluate(pow(2, x)), places=4)
self.assertAlmostEqual(-7, self.evaluate(-x))
self.assertAlmostEqual(7, self.evaluate(abs(x)))
x = get_var([7, 8, 9], dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
self.assertEqual(self.evaluate(x[1]), 8)
if tf2.enabled() and context.executing_eagerly():
self.assertAllEqual(x == [7., 8., 10.], [True, True, False])
self.assertAllEqual(x != [7., 8., 10.], [False, False, True])
@ds_combinations.generate(maybe_distribute)
def test_assign(self, distribution):
with distribution.scope():
x = get_var(0., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
# outside of auto cast scope.
v1 = constant_op.constant(3., dtype=dtypes.float32)
v2 = constant_op.constant(3., dtype=dtypes.float16)
def run_and_check():
# Assign float32 values
self.assertAllClose(3., self.evaluate(x.assign(v1)))
self.assertAllClose(3. * 2, self.evaluate(x.assign_add(v1)))
self.assertAllClose(3., self.evaluate(x.assign_sub(v1)))
# Attempt to assign float16 values
with self.assertRaisesRegex(
ValueError,
'conversion requested dtype float32 for Tensor with dtype float16'):
self.evaluate(x.assign(v2))
with self.assertRaisesRegex(
ValueError,
'conversion requested dtype float32 for Tensor with dtype float16'):
self.evaluate(x.assign_add(v2))
with self.assertRaisesRegex(
ValueError,
'conversion requested dtype float32 for Tensor with dtype float16'):
self.evaluate(x.assign_sub(v2))
# Assign Python floats
self.assertAllClose(0., self.evaluate(x.assign(0.)))
self.assertAllClose(3., self.evaluate(x.assign(3.)))
self.assertAllClose(3. * 2, self.evaluate(x.assign_add(3.)))
self.assertAllClose(3., self.evaluate(x.assign_sub(3.)))
# Assign multiple times
# This currently doesn't work in graph mode if a strategy is used
if not ds_context.has_strategy() or context.executing_eagerly():
assign = x.assign(1.)
self.assertAllClose(1., self.evaluate(assign))
self.assertAllClose(0., self.evaluate(assign.assign(0.)))
assign_add = x.assign_add(3.)
self.assertAllClose(3., self.evaluate(assign_add))
self.assertAllClose(3. * 3,
self.evaluate(x.assign_add(3.).assign_add(3.)))
self.assertAllClose(3. * 3, x)
assign_sub = x.assign_sub(3.)
self.assertAllClose(3. * 2, self.evaluate(assign_sub))
self.assertAllClose(0.,
self.evaluate(x.assign_sub(3.).assign_sub(3.)))
# Assign with read_value=False
self.assertIsNone(self.evaluate(x.assign(1., read_value=False)))
self.assertAllClose(1., self.evaluate(x))
self.assertIsNone(self.evaluate(x.assign_add(2., read_value=False)))
self.assertAllClose(3., self.evaluate(x))
self.assertIsNone(self.evaluate(x.assign_sub(3., read_value=False)))
self.assertAllClose(0., self.evaluate(x))
# Use the tf.assign functions instead of the var.assign methods.
self.assertAllClose(0., self.evaluate(state_ops.assign(x, 0.)))
self.assertAllClose(3., self.evaluate(state_ops.assign(x, 3.)))
self.assertAllClose(3. * 2,
self.evaluate(state_ops.assign_add(x, 3.)))
self.assertAllClose(3., self.evaluate(state_ops.assign_sub(x, 3.)))
run_and_check()
# reset x
self.evaluate(x.assign(0.))
# within auto cast scope.
with autocast_variable.enable_auto_cast_variables(dtypes.float16):
# assign still expect float32 value even if in float16 scope
run_and_check()
@ds_combinations.generate(maybe_distribute)
def test_assign_tf_function(self, distribution):
if not context.executing_eagerly():
self.skipTest('Test is not compatible with graph mode')
with distribution.scope():
x = get_var(0., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
@def_function.function
def run_assign():
return x.assign(1.).assign_add(3.).assign_add(3.).assign_sub(2.)
with autocast_variable.enable_auto_cast_variables(dtypes.float16):
self.assertAllClose(5., self.evaluate(run_assign()))
@ds_combinations.generate(maybe_distribute)
def test_op_attribute(self, distribution):
with distribution.scope():
x = get_var(0., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
# Variable.op raises an AttributeError in Eager mode and is an op in graph
# mode. Variable.assign(...).op is None in Eager mode and an op in Graph
# mode or a tf.function. We test this is also true of AutoCastVariable.
if context.executing_eagerly():
with self.assertRaises(AttributeError):
x.op # pylint: disable=pointless-statement
self.assertIsNone(x.assign(1.0).op)
self.assertIsNone(x.assign_add(1.0).op)
self.assertIsNone(x.assign_sub(1.0).op)
else:
self.assertIsNotNone(x.op)
self.assertIsNotNone(x.assign(1.0).op)
self.assertIsNotNone(x.assign_add(1.0).op)
self.assertIsNotNone(x.assign_sub(1.0).op)
@def_function.function
def func():
self.assertIsNotNone(x.assign(1.0).op)
self.assertIsNotNone(x.assign_add(1.0).op)
self.assertIsNotNone(x.assign_sub(1.0).op)
func()
@ds_combinations.generate(maybe_distribute)
def test_tf_function_control_dependencies(self, distribution):
if not context.executing_eagerly():
self.skipTest('Test is not compatible with graph mode')
with distribution.scope():
x = get_var(0., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
@def_function.function
def func():
update = x.assign_add(1.)
with ops.control_dependencies([update]):
x.assign_add(1.)
func()
self.assertAllClose(2., self.evaluate(x))
@ds_combinations.generate(maybe_distribute)
def test_assign_stays_in_true_dtype(self, distribution):
with distribution.scope():
x = get_var(1., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
# small_val is a value such that 1.0 + small_val == 1.0 in fp16, but not
# in fp32
small_val = np.finfo('float16').eps / 2
small_tensor = constant_op.constant(small_val, dtype=dtypes.float32)
with autocast_variable.enable_auto_cast_variables(dtypes.float16):
# Variable should be increased, despite it appearing to be the same
# float16 value.
self.evaluate(x.assign(1. + small_tensor))
self.assertEqual(1., self.evaluate(x.value()))
self.assertEqual(1. + small_val, self.evaluate(x))
self.evaluate(x.assign(1.))
with autocast_variable.enable_auto_cast_variables(dtypes.float16):
self.evaluate(x.assign_add(small_tensor))
self.assertEqual(1., self.evaluate(x.value()))
self.assertEqual(1. + small_val, self.evaluate(x))
def test_thread_local_autocast_dtype(self):
x = get_var(1., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
with autocast_variable.enable_auto_cast_variables(dtypes.float16):
self.assertEqual(array_ops.identity(x).dtype, dtypes.float16)
# New threads should not see the modified value of the autocast dtype.
var_dtype = None
def f():
nonlocal var_dtype
var_dtype = x._cast_dtype
thread = threading.Thread(target=f)
thread.start()
thread.join()
self.assertEqual(var_dtype, dtypes.float32)
@ds_combinations.generate(maybe_distribute)
def test_checkpoint(self, distribution):
with self.test_session():
with distribution.scope():
x = get_var(1., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
self.evaluate(x.assign(123.))
checkpoint = trackable_utils.Checkpoint(x=x)
prefix = os.path.join(self.get_temp_dir(), 'ckpt')
save_path = checkpoint.save(prefix)
self.evaluate(x.assign(234.))
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.assertEqual(self.evaluate(x), 123.)
@ds_combinations.generate(maybe_distribute)
def test_invalid_wrapped_variable(self, distribution):
with distribution.scope():
# Wrap a non-variable
with self.assertRaisesRegex(ValueError, 'variable must be of type'):
x = constant_op.constant([1.], dtype=dtypes.float32)
autocast_variable.create_autocast_variable(x)
# Wrap a non-floating point variable
with self.assertRaisesRegex(ValueError,
'variable must be a floating point'):
x = get_var(1, dtypes.int32)
autocast_variable.create_autocast_variable(x)
def test_repr(self):
# We do not test with DistributionStrategy because we do not want to rely on
# the exact __repr__ output of a DistributedVariable.
x = get_var(1., dtypes.float32, name='x')
x = autocast_variable.create_autocast_variable(x)
if context.executing_eagerly():
self.assertStartsWith(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float32 "
"dtype_to_cast_to=float32, numpy="
)
with autocast_variable.enable_auto_cast_variables(dtypes.float16):
self.assertStartsWith(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float32 "
"dtype_to_cast_to=float16, numpy="
)
else:
self.assertEqual(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float32 "
"dtype_to_cast_to=float32>"
)
with autocast_variable.enable_auto_cast_variables(dtypes.float16):
self.assertEqual(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float32 "
"dtype_to_cast_to=float16>"
)
def test_repr_distributed(self):
strategy = mirrored_strategy.MirroredStrategy(['/cpu:1', '/cpu:2'])
with strategy.scope():
x = get_var(1., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
use_policy = getattr(strategy.extended, '_use_var_policy', False)
if use_policy:
self.assertRegex(
repr(x).replace('\n', ' '),
'<AutoCastDistributedVariable dtype=float32 '
'dtype_to_cast_to=float32 '
'inner_variable=DistributedVariable.*>')
else:
self.assertRegex(
repr(x).replace('\n', ' '),
'<AutoCastDistributedVariable dtype=float32 '
'dtype_to_cast_to=float32 '
'inner_variable=MirroredVariable.*>')
@ds_combinations.generate(combinations.combine(
optimizer_class=[
adadelta.Adadelta,
adagrad.Adagrad,
adam.Adam,
adamax.Adamax,
ftrl.Ftrl,
gradient_descent_v2.SGD,
nadam.Nadam,
rmsprop.RMSprop,
gradient_descent_v1.GradientDescentOptimizer
],
use_tf_function=[False, True]))
def test_optimizer(self, optimizer_class, use_tf_function):
if use_tf_function and not context.executing_eagerly():
self.skipTest('Test does not support graph mode with tf.function')
x = get_var(1., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
y = get_var(1., dtypes.float32)
opt = optimizer_class(learning_rate=1.)
def f():
# Minimize both the AutoCastVariable and the normal tf.Variable. Both
# variables should be updated to the same value.
op = opt.minimize(lambda: x + y, var_list=[x, y])
return None if ops.executing_eagerly_outside_functions() else op
if use_tf_function:
f = def_function.function(f)
if context.executing_eagerly():
f()
else:
op = f()
self.evaluate(variables.global_variables_initializer())
self.evaluate(op)
# Assert the AutoCastVariable has changed from its initial value
self.assertNotEqual(self.evaluate(x), 1.)
# Assert AutoCastVariable is updated correctly by comparing it to the normal
# variable
self.assertAlmostEqual(self.evaluate(x), self.evaluate(y))
if optimizer_class in (gradient_descent_v2.SGD,
gradient_descent_v1.GradientDescentOptimizer):
# With SGD, the variables decreases by exactly 1
self.assertEqual(self.evaluate(x), 0)
if __name__ == '__main__':
test.main()
|
master_server.py | #!/usr/bin/env python
#
# Copyright 2013 Tanel Alumae
"""
Reads speech data via websocket requests, sends it to Redis, waits for results from Redis and
forwards to client via websocket
"""
import sys
import logging
import json
import codecs
import os.path
import uuid
import time
import threading
import functools
from Queue import Queue
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
import tornado.gen
import tornado.concurrent
import settings
import common
description={}
class Application(tornado.web.Application):
def __init__(self):
settings = dict(
cookie_secret="43oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
template_path=os.path.join(os.path.dirname(os.path.dirname(__file__)), "templates"),
static_path=os.path.join(os.path.dirname(os.path.dirname(__file__)), "static"),
xsrf_cookies=False,
autoescape=None,
)
handlers = [
(r"/", MainHandler),
(r"/client/ws/speech", DecoderSocketHandler),
(r"/client/ws/status", StatusSocketHandler),
(r"/client/dynamic/reference", ReferenceHandler),
(r"/client/dynamic/recognize", HttpChunkedRecognizeHandler),
(r"/worker/ws/speech", WorkerSocketHandler),
(r"/client/static/(.*)", tornado.web.StaticFileHandler, {'path': settings["static_path"]}),
]
tornado.web.Application.__init__(self, handlers, **settings)
self.available_workers = set()
self.status_listeners = set()
self.num_requests_processed = 0
def send_status_update_single(self, ws):
status = dict(num_workers_available=len(self.available_workers), num_requests_processed=self.num_requests_processed, description=description)
ws.write_message(json.dumps(status))
def send_status_update(self):
for ws in self.status_listeners:
self.send_status_update_single(ws)
def save_reference(self, content_id, content):
refs = {}
try:
with open("reference-content.json") as f:
refs = json.load(f)
except:
pass
refs[content_id] = content
with open("reference-content.json", "w") as f:
json.dump(refs, f, indent=2)
class MainHandler(tornado.web.RequestHandler):
def get(self):
current_directory = os.path.dirname(os.path.abspath(__file__))
parent_directory = os.path.join(current_directory, os.pardir)
readme = os.path.join(parent_directory, "README.md")
self.render(readme)
def run_async(func):
@functools.wraps(func)
def async_func(*args, **kwargs):
func_hl = threading.Thread(target=func, args=args, kwargs=kwargs)
func_hl.start()
return func_hl
return async_func
def content_type_to_caps(content_type):
"""
Converts MIME-style raw audio content type specifier to GStreamer CAPS string
"""
default_attributes= {"rate": 16000, "format" : "S16LE", "channels" : 1, "layout" : "interleaved"}
media_type, _, attr_string = content_type.replace(";", ",").partition(",")
if media_type in ["audio/x-raw", "audio/x-raw-int"]:
media_type = "audio/x-raw"
attributes = default_attributes
for (key,_,value) in [p.partition("=") for p in attr_string.split(",")]:
attributes[key.strip()] = value.strip()
return "%s, %s" % (media_type, ", ".join(["%s=%s" % (key, value) for (key,value) in attributes.iteritems()]))
else:
return content_type
@tornado.web.stream_request_body
class HttpChunkedRecognizeHandler(tornado.web.RequestHandler):
"""
Provides a HTTP POST/PUT interface supporting chunked transfer requests, similar to that provided by
http://github.com/alumae/ruby-pocketsphinx-server.
"""
def prepare(self):
self.id = str(uuid.uuid4())
self.final_hyp = ""
self.final_result_queue = Queue()
self.user_id = self.request.headers.get("device-id", "none")
self.content_id = self.request.headers.get("content-id", "none")
logging.info("%s: OPEN: user='%s', content='%s'" % (self.id, self.user_id, self.content_id))
self.worker = None
self.error_status = 0
self.error_message = None
try:
self.worker = self.application.available_workers.pop()
self.application.send_status_update()
logging.info("%s: Using worker %s" % (self.id, self.__str__()))
self.worker.set_client_socket(self)
content_type = self.request.headers.get("Content-Type", None)
if content_type:
content_type = content_type_to_caps(content_type)
logging.info("%s: Using content type: %s" % (self.id, content_type))
self.worker.write_message(json.dumps(dict(id=self.id, content_type=content_type, user_id=self.user_id, content_id=self.content_id)))
except KeyError:
logging.warn("%s: No worker available for client request" % self.id)
self.set_status(503)
self.finish("No workers available")
def data_received(self, chunk):
assert self.worker is not None
logging.debug("%s: Forwarding client message of length %d to worker" % (self.id, len(chunk)))
self.worker.write_message(chunk, binary=True)
def post(self, *args, **kwargs):
self.end_request(args, kwargs)
def put(self, *args, **kwargs):
self.end_request(args, kwargs)
@run_async
def get_final_hyp(self, callback=None):
logging.info("%s: Waiting for final result..." % self.id)
callback(self.final_result_queue.get(block=True))
@tornado.web.asynchronous
@tornado.gen.coroutine
def end_request(self, *args, **kwargs):
logging.info("%s: Handling the end of chunked recognize request" % self.id)
assert self.worker is not None
self.worker.write_message("EOS", binary=True)
logging.info("%s: yielding..." % self.id)
hyp = yield tornado.gen.Task(self.get_final_hyp)
if self.error_status == 0:
logging.info("%s: Final hyp: %s" % (self.id, hyp))
response = {"status" : 0, "id": self.id, "hypotheses": [{"utterance" : hyp}]}
self.write(response)
else:
logging.info("%s: Error (status=%d) processing HTTP request: %s" % (self.id, self.error_status, self.error_message))
response = {"status" : self.error_status, "id": self.id, "message": self.error_message}
self.write(response)
self.application.num_requests_processed += 1
self.application.send_status_update()
self.worker.set_client_socket(None)
self.worker.close()
self.finish()
logging.info("Everything done")
def send_event(self, event):
event_str = str(event)
if len(event_str) > 100:
event_str = event_str[:97] + "..."
logging.info("%s: Receiving event %s from worker" % (self.id, event_str))
if event["status"] == 0 and ("result" in event):
try:
if len(event["result"]["hypotheses"]) > 0 and event["result"]["final"]:
if len(self.final_hyp) > 0:
self.final_hyp += " "
self.final_hyp += event["result"]["hypotheses"][0]["transcript"]
except:
e = sys.exc_info()[0]
logging.warn("Failed to extract hypothesis from recognition result:" + e)
elif event["status"] != 0:
self.error_status = event["status"]
self.error_message = event.get("message", "")
def close(self):
logging.info("%s: Receiving 'close' from worker" % (self.id))
self.final_result_queue.put(self.final_hyp)
class ReferenceHandler(tornado.web.RequestHandler):
def post(self, *args, **kwargs):
content_id = self.request.headers.get("Content-Id")
if content_id:
content = codecs.decode(self.request.body, "utf-8")
user_id = self.request.headers.get("User-Id", "")
self.application.save_reference(content_id, dict(content=content, user_id=user_id, time=time.strftime("%Y-%m-%dT%H:%M:%S")))
logging.info("Received reference text for content %s and user %s" % (content_id, user_id))
self.set_header('Access-Control-Allow-Origin', '*')
else:
self.set_status(400)
self.finish("No Content-Id specified")
def options(self, *args, **kwargs):
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Access-Control-Allow-Methods', 'POST, OPTIONS')
self.set_header('Access-Control-Max-Age', 1000)
# note that '*' is not valid for Access-Control-Allow-Headers
self.set_header('Access-Control-Allow-Headers', 'origin, x-csrftoken, content-type, accept, User-Id, Content-Id')
class StatusSocketHandler(tornado.websocket.WebSocketHandler):
# needed for Tornado 4.0
def check_origin(self, origin):
return True
def open(self):
logging.info("New status listener")
self.application.status_listeners.add(self)
self.application.send_status_update_single(self)
def on_close(self):
logging.info("Status listener left")
self.application.status_listeners.remove(self)
class WorkerSocketHandler(tornado.websocket.WebSocketHandler):
def __init__(self, application, request, **kwargs):
tornado.websocket.WebSocketHandler.__init__(self, application, request, **kwargs)
self.client_socket = None
# needed for Tornado 4.0
def check_origin(self, origin):
return True
def open(self):
self.client_socket = None
self.application.available_workers.add(self)
logging.info("New worker available " + self.__str__())
self.application.send_status_update()
def on_close(self):
logging.info("Worker " + self.__str__() + " leaving")
self.application.available_workers.discard(self)
if self.client_socket:
self.client_socket.close()
self.application.send_status_update()
def on_message(self, message):
event = json.loads(message)
if "description" in event:
global description
description=event["description"]
if self.client_socket is not None:
self.client_socket.send_event(event)
def set_client_socket(self, client_socket):
self.client_socket = client_socket
class DecoderSocketHandler(tornado.websocket.WebSocketHandler):
# needed for Tornado 4.0
def check_origin(self, origin):
return True
def send_event(self, event):
event["id"] = self.id
event_str = str(event)
if len(event_str) > 100:
event_str = event_str[:97] + "..."
logging.info("%s: Sending event %s to client" % (self.id, event_str))
self.write_message(json.dumps(event))
def open(self):
self.id = str(uuid.uuid4())
logging.info("%s: OPEN" % (self.id))
logging.info("%s: Request arguments: %s" % (self.id, " ".join(["%s=\"%s\"" % (a, self.get_argument(a)) for a in self.request.arguments])))
self.user_id = self.get_argument("user-id", "none", True)
self.content_id = self.get_argument("content-id", "none", True)
self.worker = None
try:
self.worker = self.application.available_workers.pop()
self.application.send_status_update()
logging.info("%s: Using worker %s" % (self.id, self.__str__()))
self.worker.set_client_socket(self)
content_type = self.get_argument("content-type", None, True)
if content_type:
logging.info("%s: Using content type: %s" % (self.id, content_type))
self.worker.write_message(json.dumps(dict(id=self.id, content_type=content_type, user_id=self.user_id, content_id=self.content_id)))
except KeyError:
logging.warn("%s: No worker available for client request" % self.id)
event = dict(status=common.STATUS_NOT_AVAILABLE, message="No decoder available, try again later")
self.send_event(event)
self.close()
def on_connection_close(self):
logging.info("%s: Handling on_connection_close()" % self.id)
self.application.num_requests_processed += 1
self.application.send_status_update()
if self.worker:
try:
self.worker.set_client_socket(None)
logging.info("%s: Closing worker connection" % self.id)
self.worker.close()
except:
pass
def on_message(self, message):
assert self.worker is not None
logging.info("%s: Forwarding client message (%s) of length %d to worker" % (self.id, type(message), len(message)))
if isinstance(message, unicode):
self.worker.write_message(message, binary=False)
else:
self.worker.write_message(message, binary=True)
def main():
logging.basicConfig(level=logging.DEBUG, format="%(levelname)8s %(asctime)s %(message)s ")
logging.debug('Starting up server')
from tornado.options import define, options
define("certfile", default="", help="certificate file for secured SSL connection")
define("keyfile", default="", help="key file for secured SSL connection")
tornado.options.parse_command_line()
app = Application()
if options.certfile and options.keyfile:
ssl_options = {
"certfile": options.certfile,
"keyfile": options.keyfile,
}
logging.info("Using SSL for serving requests")
app.listen(options.port, ssl_options=ssl_options)
else:
app.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
base_test.py | # -*- coding: utf-8 -*-
import contextlib
import copy
import datetime
import json
import threading
import elasticsearch
import mock
import pytest
from elasticsearch.exceptions import ElasticsearchException
from elastalert.enhancements import BaseEnhancement
from elastalert.enhancements import DropMatchException
from elastalert.kibana import dashboard_temp
from elastalert.util import dt_to_ts
from elastalert.util import dt_to_unix
from elastalert.util import dt_to_unixms
from elastalert.util import EAException
from elastalert.util import ts_now
from elastalert.util import ts_to_dt
from elastalert.util import unix_to_dt
START_TIMESTAMP = '2014-09-26T12:34:45Z'
END_TIMESTAMP = '2014-09-27T12:34:45Z'
START = ts_to_dt(START_TIMESTAMP)
END = ts_to_dt(END_TIMESTAMP)
def _set_hits(ea_inst, hits):
res = {'hits': {'total': len(hits), 'hits': hits}}
ea_inst.client_es.return_value = res
def generate_hits(timestamps, **kwargs):
hits = []
id_iter = xrange(len(timestamps)).__iter__()
for ts in timestamps:
data = {'_id': 'id' + str(id_iter.next()),
'_source': {'@timestamp': ts},
'_type': 'logs',
'_index': 'idx'}
for key, item in kwargs.iteritems():
data['_source'][key] = item
# emulate process_hits(), add metadata to _source
for field in ['_id', '_type', '_index']:
data['_source'][field] = data[field]
hits.append(data)
return {'hits': {'total': len(hits), 'hits': hits}}
def assert_alerts(ea_inst, calls):
""" Takes a list of lists of timestamps. Asserts that an alert was called for each list, containing those timestamps. """
assert ea_inst.rules[0]['alert'][0].alert.call_count == len(calls)
for call_num, call_args in enumerate(ea_inst.rules[0]['alert'][0].alert.call_args_list):
assert not any([match['@timestamp'] not in calls[call_num] for match in call_args[0][0]])
assert len(call_args[0][0]) == len(calls[call_num])
def test_starttime(ea):
invalid = ['2014-13-13',
'2014-11-24T30:00:00',
'Not A Timestamp']
for ts in invalid:
with pytest.raises((TypeError, ValueError)):
ts_to_dt(ts)
def test_init_rule(ea):
# Simulate state of a rule just loaded from a file
ea.rules[0]['minimum_starttime'] = datetime.datetime.now()
new_rule = copy.copy(ea.rules[0])
map(new_rule.pop, ['agg_matches', 'current_aggregate_id', 'processed_hits', 'minimum_starttime'])
# Properties are copied from ea.rules[0]
ea.rules[0]['starttime'] = '2014-01-02T00:11:22'
ea.rules[0]['processed_hits'] = ['abcdefg']
new_rule = ea.init_rule(new_rule, False)
for prop in ['starttime', 'agg_matches', 'current_aggregate_id', 'processed_hits', 'minimum_starttime']:
assert new_rule[prop] == ea.rules[0][prop]
# Properties are fresh
new_rule = ea.init_rule(new_rule, True)
new_rule.pop('starttime')
assert 'starttime' not in new_rule
assert new_rule['processed_hits'] == {}
def test_query(ea):
ea.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
ea.current_es.search.assert_called_with(body={'query': {'filtered': {'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}}, 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], ignore_unavailable=True, size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive'])
def test_query_with_fields(ea):
ea.rules[0]['_source_enabled'] = False
ea.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
ea.current_es.search.assert_called_with(body={'query': {'filtered': {'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}}, 'sort': [{'@timestamp': {'order': 'asc'}}], 'fields': ['@timestamp']}, index='idx', ignore_unavailable=True, size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive'])
def test_query_with_unix(ea):
ea.rules[0]['timestamp_type'] = 'unix'
ea.rules[0]['dt_to_ts'] = dt_to_unix
ea.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
start_unix = dt_to_unix(START)
end_unix = dt_to_unix(END)
ea.current_es.search.assert_called_with(body={'query': {'filtered': {'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}}, 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], ignore_unavailable=True, size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive'])
def test_query_with_unixms(ea):
ea.rules[0]['timestamp_type'] = 'unixms'
ea.rules[0]['dt_to_ts'] = dt_to_unixms
ea.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
start_unix = dt_to_unixms(START)
end_unix = dt_to_unixms(END)
ea.current_es.search.assert_called_with(body={'query': {'filtered': {'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}}, 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], ignore_unavailable=True, size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive'])
def test_no_hits(ea):
ea.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_data.call_count == 0
def test_no_terms_hits(ea):
ea.rules[0]['use_terms_query'] = True
ea.rules[0]['query_key'] = 'QWERTY'
ea.rules[0]['doc_type'] = 'uiop'
ea.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_terms_data.call_count == 0
def test_some_hits(ea):
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP])
hits_dt = generate_hits([START, END])
ea.current_es.search.return_value = hits
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_data.call_count == 1
ea.rules[0]['type'].add_data.assert_called_with([x['_source'] for x in hits_dt['hits']['hits']])
def test_some_hits_unix(ea):
ea.rules[0]['timestamp_type'] = 'unix'
ea.rules[0]['dt_to_ts'] = dt_to_unix
ea.rules[0]['ts_to_dt'] = unix_to_dt
hits = generate_hits([dt_to_unix(START), dt_to_unix(END)])
hits_dt = generate_hits([START, END])
ea.current_es.search.return_value = copy.deepcopy(hits)
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_data.call_count == 1
ea.rules[0]['type'].add_data.assert_called_with([x['_source'] for x in hits_dt['hits']['hits']])
def _duplicate_hits_generator(timestamps, **kwargs):
"""Generator repeatedly returns identical hits dictionaries
"""
while True:
yield generate_hits(timestamps, **kwargs)
def test_duplicate_timestamps(ea):
ea.current_es.search.side_effect = _duplicate_hits_generator([START_TIMESTAMP] * 3, blah='duplicate')
ea.run_query(ea.rules[0], START, ts_to_dt('2014-01-01T00:00:00Z'))
assert len(ea.rules[0]['type'].add_data.call_args_list[0][0][0]) == 3
assert ea.rules[0]['type'].add_data.call_count == 1
# Run the query again, duplicates will be removed and not added
ea.run_query(ea.rules[0], ts_to_dt('2014-01-01T00:00:00Z'), END)
assert ea.rules[0]['type'].add_data.call_count == 1
def test_match(ea):
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP])
ea.current_es.search.return_value = hits
ea.rules[0]['type'].matches = [{'@timestamp': END}]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
ea.rules[0]['alert'][0].alert.called_with({'@timestamp': END_TIMESTAMP})
assert ea.rules[0]['alert'][0].alert.call_count == 1
def test_run_rule_calls_garbage_collect(ea):
start_time = '2014-09-26T00:00:00Z'
end_time = '2014-09-26T12:00:00Z'
ea.buffer_time = datetime.timedelta(hours=1)
ea.run_every = datetime.timedelta(hours=1)
with contextlib.nested(mock.patch.object(ea.rules[0]['type'], 'garbage_collect'),
mock.patch.object(ea, 'run_query')) as (mock_gc, mock_get_hits):
ea.run_rule(ea.rules[0], ts_to_dt(end_time), ts_to_dt(start_time))
# Running ElastAlert every hour for 12 hours, we should see self.garbage_collect called 12 times.
assert mock_gc.call_count == 12
# The calls should be spaced 1 hour apart
expected_calls = [ts_to_dt(start_time) + datetime.timedelta(hours=i) for i in range(1, 13)]
for e in expected_calls:
mock_gc.assert_any_call(e)
def run_rule_query_exception(ea, mock_es):
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es_init:
mock_es_init.return_value = mock_es
ea.run_rule(ea.rules[0], END, START)
# Assert neither add_data nor garbage_collect were called
# and that starttime did not change
assert ea.rules[0].get('starttime') == START
assert ea.rules[0]['type'].add_data.call_count == 0
assert ea.rules[0]['type'].garbage_collect.call_count == 0
assert ea.rules[0]['type'].add_count_data.call_count == 0
def test_query_exception(ea):
mock_es = mock.Mock()
mock_es.search.side_effect = ElasticsearchException
run_rule_query_exception(ea, mock_es)
def test_query_exception_count_query(ea):
ea.rules[0]['use_count_query'] = True
ea.rules[0]['doc_type'] = 'blahblahblahblah'
mock_es = mock.Mock()
mock_es.count.side_effect = ElasticsearchException
run_rule_query_exception(ea, mock_es)
def test_match_with_module(ea):
mod = BaseEnhancement(ea.rules[0])
mod.process = mock.Mock()
ea.rules[0]['match_enhancements'] = [mod]
test_match(ea)
mod.process.assert_called_with({'@timestamp': END, 'num_hits': 0, 'num_matches': 1})
def test_match_with_module_with_agg(ea):
mod = BaseEnhancement(ea.rules[0])
mod.process = mock.Mock()
ea.rules[0]['match_enhancements'] = [mod]
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=15)
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP])
ea.current_es.search.return_value = hits
ea.rules[0]['type'].matches = [{'@timestamp': END}]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert mod.process.call_count == 0
def test_match_with_enhancements_first(ea):
mod = BaseEnhancement(ea.rules[0])
mod.process = mock.Mock()
ea.rules[0]['match_enhancements'] = [mod]
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=15)
ea.rules[0]['run_enhancements_first'] = True
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP])
ea.current_es.search.return_value = hits
ea.rules[0]['type'].matches = [{'@timestamp': END}]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch.object(ea, 'add_aggregated_alert') as add_alert:
ea.run_rule(ea.rules[0], END, START)
mod.process.assert_called_with({'@timestamp': END, 'num_hits': 0, 'num_matches': 1})
assert add_alert.call_count == 1
# Assert that dropmatchexception behaves properly
mod.process = mock.MagicMock(side_effect=DropMatchException)
ea.rules[0]['type'].matches = [{'@timestamp': END}]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch.object(ea, 'add_aggregated_alert') as add_alert:
ea.run_rule(ea.rules[0], END, START)
mod.process.assert_called_with({'@timestamp': END, 'num_hits': 0, 'num_matches': 1})
assert add_alert.call_count == 0
def test_agg_matchtime(ea):
ea.max_aggregation = 1337
hits_timestamps = ['2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:47:45']
alerttime1 = dt_to_ts(ts_to_dt(hits_timestamps[0]) + datetime.timedelta(minutes=10))
hits = generate_hits(hits_timestamps)
ea.current_es.search.return_value = hits
with mock.patch('elastalert.elastalert.elasticsearch_client'):
# Aggregate first two, query over full range
ea.rules[0]['aggregate_by_match_time'] = True
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.rules[0]['type'].matches = [{'@timestamp': h} for h in hits_timestamps]
ea.run_rule(ea.rules[0], END, START)
# Assert that the three matches were added to Elasticsearch
call1 = ea.writeback_es.index.call_args_list[0][1]['body']
call2 = ea.writeback_es.index.call_args_list[1][1]['body']
call3 = ea.writeback_es.index.call_args_list[2][1]['body']
assert call1['match_body']['@timestamp'] == '2014-09-26T12:34:45'
assert not call1['alert_sent']
assert 'aggregate_id' not in call1
assert call1['alert_time'] == alerttime1
assert call2['match_body']['@timestamp'] == '2014-09-26T12:40:45'
assert not call2['alert_sent']
assert call2['aggregate_id'] == 'ABCD'
assert call3['match_body']['@timestamp'] == '2014-09-26T12:47:45'
assert not call3['alert_sent']
assert 'aggregate_id' not in call3
# First call - Find all pending alerts (only entries without agg_id)
# Second call - Find matches with agg_id == 'ABCD'
# Third call - Find matches with agg_id == 'CDEF'
ea.writeback_es.search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_source': call1},
{'_id': 'CDEF', '_source': call3}]}},
{'hits': {'hits': [{'_id': 'BCDE', '_source': call2}]}},
{'hits': {'total': 0, 'hits': []}}]
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es:
ea.send_pending_alerts()
# Assert that current_es was refreshed from the aggregate rules
assert mock_es.called_with(host='', port='')
assert mock_es.call_count == 2
assert_alerts(ea, [hits_timestamps[:2], hits_timestamps[2:]])
call1 = ea.writeback_es.search.call_args_list[7][1]['body']
call2 = ea.writeback_es.search.call_args_list[8][1]['body']
call3 = ea.writeback_es.search.call_args_list[9][1]['body']
call4 = ea.writeback_es.search.call_args_list[10][1]['body']
assert 'alert_time' in call2['filter']['range']
assert call3['query']['query_string']['query'] == 'aggregate_id:ABCD'
assert call4['query']['query_string']['query'] == 'aggregate_id:CDEF'
assert ea.writeback_es.search.call_args_list[9][1]['size'] == 1337
def test_agg_not_matchtime(ea):
ea.max_aggregation = 1337
hits_timestamps = ['2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:47:45']
match_time = ts_to_dt('2014-09-26T12:55:00Z')
hits = generate_hits(hits_timestamps)
ea.current_es.search.return_value = hits
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch('elastalert.elastalert.ts_now', return_value=match_time):
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.rules[0]['type'].matches = [{'@timestamp': h} for h in hits_timestamps]
ea.run_rule(ea.rules[0], END, START)
# Assert that the three matches were added to Elasticsearch
call1 = ea.writeback_es.index.call_args_list[0][1]['body']
call2 = ea.writeback_es.index.call_args_list[1][1]['body']
call3 = ea.writeback_es.index.call_args_list[2][1]['body']
assert call1['match_body']['@timestamp'] == '2014-09-26T12:34:45'
assert not call1['alert_sent']
assert 'aggregate_id' not in call1
assert call1['alert_time'] == dt_to_ts(match_time + datetime.timedelta(minutes=10))
assert call2['match_body']['@timestamp'] == '2014-09-26T12:40:45'
assert not call2['alert_sent']
assert call2['aggregate_id'] == 'ABCD'
assert call3['match_body']['@timestamp'] == '2014-09-26T12:47:45'
assert not call3['alert_sent']
assert call3['aggregate_id'] == 'ABCD'
def test_agg_cron(ea):
ea.max_aggregation = 1337
hits_timestamps = ['2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:47:45']
hits = generate_hits(hits_timestamps)
ea.current_es.search.return_value = hits
alerttime1 = dt_to_ts(ts_to_dt('2014-09-26T12:46:00'))
alerttime2 = dt_to_ts(ts_to_dt('2014-09-26T13:04:00'))
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch('elastalert.elastalert.croniter.get_next') as mock_ts:
# Aggregate first two, query over full range
mock_ts.side_effect = [dt_to_unix(ts_to_dt('2014-09-26T12:46:00')), dt_to_unix(ts_to_dt('2014-09-26T13:04:00'))]
ea.rules[0]['aggregation'] = {'schedule': '*/5 * * * *'}
ea.rules[0]['type'].matches = [{'@timestamp': h} for h in hits_timestamps]
ea.run_rule(ea.rules[0], END, START)
# Assert that the three matches were added to Elasticsearch
call1 = ea.writeback_es.index.call_args_list[0][1]['body']
call2 = ea.writeback_es.index.call_args_list[1][1]['body']
call3 = ea.writeback_es.index.call_args_list[2][1]['body']
assert call1['match_body']['@timestamp'] == '2014-09-26T12:34:45'
assert not call1['alert_sent']
assert 'aggregate_id' not in call1
assert call1['alert_time'] == alerttime1
assert call2['match_body']['@timestamp'] == '2014-09-26T12:40:45'
assert not call2['alert_sent']
assert call2['aggregate_id'] == 'ABCD'
assert call3['match_body']['@timestamp'] == '2014-09-26T12:47:45'
assert call3['alert_time'] == alerttime2
assert not call3['alert_sent']
assert 'aggregate_id' not in call3
def test_agg_no_writeback_connectivity(ea):
""" Tests that if writeback_es throws an exception, the matches will be added to 'agg_matches' and when
run again, that they will be passed again to add_aggregated_alert """
hit1, hit2, hit3 = '2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:47:45'
hits = generate_hits([hit1, hit2, hit3])
ea.current_es.search.return_value = hits
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.rules[0]['type'].matches = [{'@timestamp': hit1},
{'@timestamp': hit2},
{'@timestamp': hit3}]
ea.writeback_es.index.side_effect = elasticsearch.exceptions.ElasticsearchException('Nope')
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch.object(ea, 'find_pending_aggregate_alert', return_value=None):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['agg_matches'] == [{'@timestamp': hit1, 'num_hits': 0, 'num_matches': 3},
{'@timestamp': hit2, 'num_hits': 0, 'num_matches': 3},
{'@timestamp': hit3, 'num_hits': 0, 'num_matches': 3}]
ea.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.add_aggregated_alert = mock.Mock()
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
ea.add_aggregated_alert.assert_any_call({'@timestamp': hit1, 'num_hits': 0, 'num_matches': 3}, ea.rules[0])
ea.add_aggregated_alert.assert_any_call({'@timestamp': hit2, 'num_hits': 0, 'num_matches': 3}, ea.rules[0])
ea.add_aggregated_alert.assert_any_call({'@timestamp': hit3, 'num_hits': 0, 'num_matches': 3}, ea.rules[0])
def test_agg_with_aggregation_key(ea):
ea.max_aggregation = 1337
hits_timestamps = ['2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:43:45']
match_time = ts_to_dt('2014-09-26T12:45:00Z')
hits = generate_hits(hits_timestamps)
ea.current_es.search.return_value = hits
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch('elastalert.elastalert.ts_now', return_value=match_time):
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.rules[0]['type'].matches = [{'@timestamp': h} for h in hits_timestamps]
# Hit1 and Hit3 should be aggregated together, since they have same query_key value
ea.rules[0]['type'].matches[0]['key'] = 'Key Value 1'
ea.rules[0]['type'].matches[1]['key'] = 'Key Value 2'
ea.rules[0]['type'].matches[2]['key'] = 'Key Value 1'
ea.rules[0]['aggregation_key'] = 'key'
ea.run_rule(ea.rules[0], END, START)
# Assert that the three matches were added to elasticsearch
call1 = ea.writeback_es.index.call_args_list[0][1]['body']
call2 = ea.writeback_es.index.call_args_list[1][1]['body']
call3 = ea.writeback_es.index.call_args_list[2][1]['body']
assert call1['match_body']['key'] == 'Key Value 1'
assert not call1['alert_sent']
assert 'aggregate_id' not in call1
assert 'aggregate_key' in call1
assert call1['aggregate_key'] == 'Key Value 1'
assert call1['alert_time'] == dt_to_ts(match_time + datetime.timedelta(minutes=10))
assert call2['match_body']['key'] == 'Key Value 2'
assert not call2['alert_sent']
assert 'aggregate_id' not in call2
assert 'aggregate_key' in call2
assert call2['aggregate_key'] == 'Key Value 2'
assert call2['alert_time'] == dt_to_ts(match_time + datetime.timedelta(minutes=10))
assert call3['match_body']['key'] == 'Key Value 1'
assert not call3['alert_sent']
# Call3 should have it's aggregate_id set to call1's _id
# It should also have the same alert_time as call1
assert call3['aggregate_id'] == 'ABCD'
assert 'aggregate_key' in call3
assert call3['aggregate_key'] == 'Key Value 1'
assert call3['alert_time'] == dt_to_ts(match_time + datetime.timedelta(minutes=10))
# First call - Find all pending alerts (only entries without agg_id)
# Second call - Find matches with agg_id == 'ABCD'
# Third call - Find matches with agg_id == 'CDEF'
ea.writeback_es.search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_source': call1},
{'_id': 'CDEF', '_source': call2}]}},
{'hits': {'hits': [{'_id': 'BCDE', '_source': call3}]}},
{'hits': {'total': 0, 'hits': []}}]
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es:
ea.send_pending_alerts()
# Assert that current_es was refreshed from the aggregate rules
assert mock_es.called_with(host='', port='')
assert mock_es.call_count == 2
assert_alerts(ea, [[hits_timestamps[0], hits_timestamps[2]], [hits_timestamps[1]]])
call1 = ea.writeback_es.search.call_args_list[7][1]['body']
call2 = ea.writeback_es.search.call_args_list[8][1]['body']
call3 = ea.writeback_es.search.call_args_list[9][1]['body']
call4 = ea.writeback_es.search.call_args_list[10][1]['body']
assert 'alert_time' in call2['filter']['range']
assert call3['query']['query_string']['query'] == 'aggregate_id:ABCD'
assert call4['query']['query_string']['query'] == 'aggregate_id:CDEF'
assert ea.writeback_es.search.call_args_list[9][1]['size'] == 1337
def test_silence(ea):
# Silence test rule for 4 hours
ea.args.rule = 'test_rule.yaml' # Not a real name, just has to be set
ea.args.silence = 'hours=4'
ea.silence()
# Don't alert even with a match
match = [{'@timestamp': '2014-11-17T00:00:00'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 0
# Mock ts_now() to +5 hours, alert on match
match = [{'@timestamp': '2014-11-17T00:00:00'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
with mock.patch('elastalert.elastalert.elasticsearch_client'):
# Converted twice to add tzinfo
mock_ts.return_value = ts_to_dt(dt_to_ts(datetime.datetime.utcnow() + datetime.timedelta(hours=5)))
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
def test_compound_query_key(ea):
ea.rules[0]['query_key'] = 'this,that,those'
ea.rules[0]['compound_query_key'] = ['this', 'that', 'those']
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP], this='abc', that=u'☃', those=4)
ea.current_es.search.return_value = hits
ea.run_query(ea.rules[0], START, END)
call_args = ea.rules[0]['type'].add_data.call_args_list[0]
assert 'this,that,those' in call_args[0][0][0]
assert call_args[0][0][0]['this,that,those'] == u'abc, ☃, 4'
def test_silence_query_key(ea):
# Silence test rule for 4 hours
ea.args.rule = 'test_rule.yaml' # Not a real name, just has to be set
ea.args.silence = 'hours=4'
ea.silence('anytest.qlo')
# Don't alert even with a match
match = [{'@timestamp': '2014-11-17T00:00:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
ea.rules[0]['query_key'] = 'username'
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 0
# If there is a new record with a different value for the query_key, we should get an alert
match = [{'@timestamp': '2014-11-17T00:00:01', 'username': 'dpopes'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Mock ts_now() to +5 hours, alert on match
match = [{'@timestamp': '2014-11-17T00:00:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
with mock.patch('elastalert.elastalert.elasticsearch_client'):
# Converted twice to add tzinfo
mock_ts.return_value = ts_to_dt(dt_to_ts(datetime.datetime.utcnow() + datetime.timedelta(hours=5)))
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 2
def test_realert(ea):
hits = ['2014-09-26T12:35:%sZ' % (x) for x in range(60)]
matches = [{'@timestamp': x} for x in hits]
ea.current_es.search.return_value = hits
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.rules[0]['realert'] = datetime.timedelta(seconds=50)
ea.rules[0]['type'].matches = matches
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Doesn't alert again
matches = [{'@timestamp': x} for x in hits]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
ea.rules[0]['type'].matches = matches
assert ea.rules[0]['alert'][0].alert.call_count == 1
# mock ts_now() to past the realert time
matches = [{'@timestamp': hits[0]}]
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
with mock.patch('elastalert.elastalert.elasticsearch_client'):
# mock_ts is converted twice to add tzinfo
mock_ts.return_value = ts_to_dt(dt_to_ts(datetime.datetime.utcnow() + datetime.timedelta(minutes=10)))
ea.rules[0]['type'].matches = matches
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 2
def test_realert_with_query_key(ea):
ea.rules[0]['query_key'] = 'username'
ea.rules[0]['realert'] = datetime.timedelta(minutes=10)
# Alert and silence username: qlo
match = [{'@timestamp': '2014-11-17T00:00:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Dont alert again for same username
match = [{'@timestamp': '2014-11-17T00:05:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Do alert with a different value
match = [{'@timestamp': '2014-11-17T00:05:00', 'username': ''}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 2
# Alert with query_key missing
match = [{'@timestamp': '2014-11-17T00:05:00'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 3
# Still alert with a different value
match = [{'@timestamp': '2014-11-17T00:05:00', 'username': 'ghengis_khan'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 4
def test_realert_with_nested_query_key(ea):
ea.rules[0]['query_key'] = 'user.name'
ea.rules[0]['realert'] = datetime.timedelta(minutes=10)
# Alert and silence username: qlo
match = [{'@timestamp': '2014-11-17T00:00:00', 'user': {'name': 'qlo'}}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Dont alert again for same username
match = [{'@timestamp': '2014-11-17T00:05:00', 'user': {'name': 'qlo'}}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
def test_count(ea):
ea.rules[0]['use_count_query'] = True
ea.rules[0]['doc_type'] = 'doctype'
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
# Assert that es.count is run against every run_every timeframe between START and END
start = START
query = {'query': {'filtered': {'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}}}
while END - start > ea.run_every:
end = start + ea.run_every
query['query']['filtered']['filter']['bool']['must'][0]['range']['@timestamp']['lte'] = dt_to_ts(end)
query['query']['filtered']['filter']['bool']['must'][0]['range']['@timestamp']['gt'] = dt_to_ts(start)
start = start + ea.run_every
ea.current_es.count.assert_any_call(body=query, doc_type='doctype', index='idx', ignore_unavailable=True)
def run_and_assert_segmented_queries(ea, start, end, segment_size):
with mock.patch.object(ea, 'run_query') as mock_run_query:
ea.run_rule(ea.rules[0], end, start)
original_end, original_start = end, start
for call_args in mock_run_query.call_args_list:
end = min(start + segment_size, original_end)
assert call_args[0][1:3] == (start, end)
start += segment_size
# Assert elastalert_status was created for the entire time range
assert ea.writeback_es.index.call_args_list[-1][1]['body']['starttime'] == dt_to_ts(original_start)
assert ea.writeback_es.index.call_args_list[-1][1]['body']['endtime'] == dt_to_ts(original_end)
def test_query_segmenting(ea):
# buffer_time segments with normal queries
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=53)
mock_es = mock.Mock()
mock_es.search.side_effect = _duplicate_hits_generator([START_TIMESTAMP])
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es_init:
mock_es_init.return_value = mock_es
run_and_assert_segmented_queries(ea, START, END, ea.rules[0]['buffer_time'])
# Assert that num_hits correctly includes the 1 hit per query
assert ea.num_hits == ea.current_es.search.call_count
# run_every segments with count queries
ea.rules[0]['use_count_query'] = True
with mock.patch('elastalert.elastalert.elasticsearch_client'):
run_and_assert_segmented_queries(ea, START, END, ea.run_every)
# run_every segments with terms queries
ea.rules[0].pop('use_count_query')
ea.rules[0]['use_terms_query'] = True
with mock.patch('elastalert.elastalert.elasticsearch_client'):
run_and_assert_segmented_queries(ea, START, END, ea.run_every)
def test_get_starttime(ea):
endtime = '2015-01-01T00:00:00Z'
mock_es = mock.Mock()
mock_es.search.return_value = {'hits': {'hits': [{'_source': {'endtime': endtime}}]}}
ea.writeback_es = mock_es
# 4 days old, will return endtime
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
mock_ts.return_value = ts_to_dt('2015-01-05T00:00:00Z') # 4 days ahead of the endtime
assert ea.get_starttime(ea.rules[0]) == ts_to_dt(endtime)
# 10 days old, will return None
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
mock_ts.return_value = ts_to_dt('2015-01-11T00:00:00Z') # 10 days ahead of the endtime
assert ea.get_starttime(ea.rules[0]) is None
def test_set_starttime(ea):
# standard query, no starttime, no last run
end = ts_to_dt('2014-10-10T10:10:10')
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 1
assert ea.rules[0]['starttime'] == end - ea.buffer_time
# Standard query, no starttime, rule specific buffer_time
ea.rules[0].pop('starttime')
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=37)
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 1
assert ea.rules[0]['starttime'] == end - datetime.timedelta(minutes=37)
ea.rules[0].pop('buffer_time')
# Standard query, no starttime, last run
ea.rules[0].pop('starttime')
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = ts_to_dt('2014-10-10T00:00:00')
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 1
assert ea.rules[0]['starttime'] == ts_to_dt('2014-10-10T00:00:00')
# Standard query, no starttime, last run, assure buffer_time doesn't go past
ea.rules[0].pop('starttime')
ea.rules[0]['buffer_time'] = datetime.timedelta(weeks=1000)
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = ts_to_dt('2014-10-09T00:00:00')
# First call sets minumum_time
ea.set_starttime(ea.rules[0], end)
# Second call uses buffer_time, but it goes past minimum
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == ts_to_dt('2014-10-09T00:00:00')
# Standard query, starttime
ea.rules[0].pop('buffer_time')
ea.rules[0].pop('minimum_starttime')
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 0
assert ea.rules[0]['starttime'] == end - ea.buffer_time
# Count query, starttime, no previous endtime
ea.rules[0]['use_count_query'] = True
ea.rules[0]['doc_type'] = 'blah'
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 0
assert ea.rules[0]['starttime'] == end - ea.run_every
# Count query, with previous endtime
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == END
# buffer_time doesn't go past previous endtime
ea.rules[0].pop('use_count_query')
ea.rules[0]['previous_endtime'] = end - ea.buffer_time * 2
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == ea.rules[0]['previous_endtime']
def test_kibana_dashboard(ea):
match = {'@timestamp': '2014-10-11T00:00:00'}
mock_es = mock.Mock()
ea.rules[0]['use_kibana_dashboard'] = 'my dashboard'
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es_init:
mock_es_init.return_value = mock_es
# No dashboard found
mock_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
with pytest.raises(EAException):
ea.use_kibana_link(ea.rules[0], match)
mock_call = mock_es.search.call_args_list[0][1]
assert mock_call['body'] == {'query': {'term': {'_id': 'my dashboard'}}}
# Dashboard found
mock_es.index.return_value = {'_id': 'ABCDEFG'}
mock_es.search.return_value = {'hits': {'hits': [{'_source': {'dashboard': json.dumps(dashboard_temp)}}]}}
url = ea.use_kibana_link(ea.rules[0], match)
assert 'ABCDEFG' in url
db = json.loads(mock_es.index.call_args_list[0][1]['body']['dashboard'])
assert 'anytest' in db['title']
# Query key filtering added
ea.rules[0]['query_key'] = 'foobar'
match['foobar'] = 'baz'
url = ea.use_kibana_link(ea.rules[0], match)
db = json.loads(mock_es.index.call_args_list[-1][1]['body']['dashboard'])
assert db['services']['filter']['list']['1']['field'] == 'foobar'
assert db['services']['filter']['list']['1']['query'] == '"baz"'
# Compound query key
ea.rules[0]['query_key'] = 'foo,bar'
ea.rules[0]['compound_query_key'] = ['foo', 'bar']
match['foo'] = 'cat'
match['bar'] = 'dog'
match['foo,bar'] = 'cat, dog'
url = ea.use_kibana_link(ea.rules[0], match)
db = json.loads(mock_es.index.call_args_list[-1][1]['body']['dashboard'])
found_filters = 0
for filter_id, filter_dict in db['services']['filter']['list'].items():
if (filter_dict['field'] == 'foo' and filter_dict['query'] == '"cat"') or \
(filter_dict['field'] == 'bar' and filter_dict['query'] == '"dog"'):
found_filters += 1
continue
assert found_filters == 2
def test_rule_changes(ea):
ea.rule_hashes = {'rules/rule1.yaml': 'ABC',
'rules/rule2.yaml': 'DEF'}
ea.rules = [ea.init_rule(rule, True) for rule in [{'rule_file': 'rules/rule1.yaml', 'name': 'rule1', 'filter': []},
{'rule_file': 'rules/rule2.yaml', 'name': 'rule2', 'filter': []}]]
ea.rules[1]['processed_hits'] = ['save me']
new_hashes = {'rules/rule1.yaml': 'ABC',
'rules/rule3.yaml': 'XXX',
'rules/rule2.yaml': '!@#$'}
with mock.patch('elastalert.elastalert.get_rule_hashes') as mock_hashes:
with mock.patch('elastalert.elastalert.load_configuration') as mock_load:
mock_load.side_effect = [{'filter': [], 'name': 'rule2', 'rule_file': 'rules/rule2.yaml'},
{'filter': [], 'name': 'rule3', 'rule_file': 'rules/rule3.yaml'}]
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
# All 3 rules still exist
assert ea.rules[0]['name'] == 'rule1'
assert ea.rules[1]['name'] == 'rule2'
assert ea.rules[1]['processed_hits'] == ['save me']
assert ea.rules[2]['name'] == 'rule3'
# Assert 2 and 3 were reloaded
assert mock_load.call_count == 2
mock_load.assert_any_call('rules/rule2.yaml', ea.conf)
mock_load.assert_any_call('rules/rule3.yaml', ea.conf)
# A new rule with a conflicting name wont load
new_hashes = copy.copy(new_hashes)
new_hashes.update({'rules/rule4.yaml': 'asdf'})
with mock.patch('elastalert.elastalert.get_rule_hashes') as mock_hashes:
with mock.patch('elastalert.elastalert.load_configuration') as mock_load:
with mock.patch.object(ea, 'send_notification_email') as mock_send:
mock_load.return_value = {'filter': [], 'name': 'rule3', 'new': 'stuff', 'rule_file': 'rules/rule4.yaml'}
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
mock_send.assert_called_once_with(exception=mock.ANY, rule_file='rules/rule4.yaml')
assert len(ea.rules) == 3
assert not any(['new' in rule for rule in ea.rules])
# An old rule which didn't load gets reloaded
new_hashes = copy.copy(new_hashes)
new_hashes['rules/rule4.yaml'] = 'qwerty'
with mock.patch('elastalert.elastalert.get_rule_hashes') as mock_hashes:
with mock.patch('elastalert.elastalert.load_configuration') as mock_load:
mock_load.return_value = {'filter': [], 'name': 'rule4', 'new': 'stuff', 'rule_file': 'rules/rule4.yaml'}
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
assert len(ea.rules) == 4
def test_strf_index(ea):
""" Test that the get_index function properly generates indexes spanning days """
ea.rules[0]['index'] = 'logstash-%Y.%m.%d'
ea.rules[0]['use_strftime_index'] = True
# Test formatting with times
start = ts_to_dt('2015-01-02T12:34:45Z')
end = ts_to_dt('2015-01-02T16:15:14Z')
assert ea.get_index(ea.rules[0], start, end) == 'logstash-2015.01.02'
end = ts_to_dt('2015-01-03T01:02:03Z')
assert ea.get_index(ea.rules[0], start, end) == 'logstash-2015.01.02,logstash-2015.01.03'
# Test formatting for wildcard
assert ea.get_index(ea.rules[0]) == 'logstash-*'
ea.rules[0]['index'] = 'logstash-%Y.%m'
assert ea.get_index(ea.rules[0]) == 'logstash-*'
ea.rules[0]['index'] = 'logstash-%Y.%m-stuff'
assert ea.get_index(ea.rules[0]) == 'logstash-*-stuff'
def test_count_keys(ea):
ea.rules[0]['timeframe'] = datetime.timedelta(minutes=60)
ea.rules[0]['top_count_keys'] = ['this', 'that']
ea.rules[0]['type'].matches = {'@timestamp': END}
ea.rules[0]['doc_type'] = 'blah'
buckets = [{'aggregations': {'filtered': {'counts': {'buckets': [{'key': 'a', 'doc_count': 10}, {'key': 'b', 'doc_count': 5}]}}}},
{'aggregations': {'filtered': {'counts': {'buckets': [{'key': 'd', 'doc_count': 10}, {'key': 'c', 'doc_count': 12}]}}}}]
ea.current_es.search.side_effect = buckets
counts = ea.get_top_counts(ea.rules[0], START, END, ['this', 'that'])
calls = ea.current_es.search.call_args_list
assert calls[0][1]['search_type'] == 'count'
assert calls[0][1]['body']['aggs']['filtered']['aggs']['counts']['terms'] == {'field': 'this', 'size': 5}
assert counts['top_events_this'] == {'a': 10, 'b': 5}
assert counts['top_events_that'] == {'d': 10, 'c': 12}
def test_exponential_realert(ea):
ea.rules[0]['exponential_realert'] = datetime.timedelta(days=1) # 1 day ~ 10 * 2**13 seconds
ea.rules[0]['realert'] = datetime.timedelta(seconds=10)
until = ts_to_dt('2015-03-24T00:00:00')
ts5s = until + datetime.timedelta(seconds=5)
ts15s = until + datetime.timedelta(seconds=15)
ts1m = until + datetime.timedelta(minutes=1)
ts5m = until + datetime.timedelta(minutes=5)
ts4h = until + datetime.timedelta(hours=4)
test_values = [(ts5s, until, 0), # Exp will increase to 1, 10*2**0 = 10s
(ts15s, until, 0), # Exp will stay at 0, 10*2**0 = 10s
(ts15s, until, 1), # Exp will increase to 2, 10*2**1 = 20s
(ts1m, until, 2), # Exp will decrease to 1, 10*2**2 = 40s
(ts1m, until, 3), # Exp will increase to 4, 10*2**3 = 1m20s
(ts5m, until, 1), # Exp will lower back to 0, 10*2**1 = 20s
(ts4h, until, 9), # Exp will lower back to 0, 10*2**9 = 1h25m
(ts4h, until, 10), # Exp will lower back to 9, 10*2**10 = 2h50m
(ts4h, until, 11)] # Exp will increase to 12, 10*2**11 = 5h
results = (1, 0, 2, 1, 4, 0, 0, 9, 12)
next_res = iter(results)
for args in test_values:
ea.silence_cache[ea.rules[0]['name']] = (args[1], args[2])
next_alert, exponent = ea.next_alert_time(ea.rules[0], ea.rules[0]['name'], args[0])
assert exponent == next_res.next()
def test_stop(ea):
""" The purpose of this test is to make sure that calling ElastAlerter.stop() will break it
out of a ElastAlerter.start() loop. This method exists to provide a mechanism for running
ElastAlert with threads and thus must be tested with threads. mock_loop verifies the loop
is running and will call stop after several iterations. """
# Exit the thread on the fourth iteration
def mock_loop():
for i in range(3):
assert ea.running
yield
ea.stop()
with mock.patch.object(ea, 'sleep_for', return_value=None):
with mock.patch.object(ea, 'run_all_rules') as mock_run:
mock_run.side_effect = mock_loop()
start_thread = threading.Thread(target=ea.start)
# Set as daemon to prevent a failed test from blocking exit
start_thread.daemon = True
start_thread.start()
# Give it a few seconds to run the loop
start_thread.join(5)
assert not ea.running
assert not start_thread.is_alive()
assert mock_run.call_count == 4
def test_notify_email(ea):
mock_smtp = mock.Mock()
ea.rules[0]['notify_email'] = ['foo@foo.foo', 'bar@bar.bar']
with mock.patch('elastalert.elastalert.SMTP') as mock_smtp_f:
mock_smtp_f.return_value = mock_smtp
# Notify_email from rules, array
ea.send_notification_email('omg', rule=ea.rules[0])
assert set(mock_smtp.sendmail.call_args_list[0][0][1]) == set(ea.rules[0]['notify_email'])
# With ea.notify_email
ea.notify_email = ['baz@baz.baz']
ea.send_notification_email('omg', rule=ea.rules[0])
assert set(mock_smtp.sendmail.call_args_list[1][0][1]) == set(['baz@baz.baz'] + ea.rules[0]['notify_email'])
# With ea.notify email but as single string
ea.rules[0]['notify_email'] = 'foo@foo.foo'
ea.send_notification_email('omg', rule=ea.rules[0])
assert set(mock_smtp.sendmail.call_args_list[2][0][1]) == set(['baz@baz.baz', 'foo@foo.foo'])
# None from rule
ea.rules[0].pop('notify_email')
ea.send_notification_email('omg', rule=ea.rules[0])
assert set(mock_smtp.sendmail.call_args_list[3][0][1]) == set(['baz@baz.baz'])
def test_uncaught_exceptions(ea):
e = Exception("Errors yo!")
# With disabling set to false
ea.disable_rules_on_error = False
ea.handle_uncaught_exception(e, ea.rules[0])
assert len(ea.rules) == 1
assert len(ea.disabled_rules) == 0
# With disabling set to true
ea.disable_rules_on_error = True
ea.handle_uncaught_exception(e, ea.rules[0])
assert len(ea.rules) == 0
assert len(ea.disabled_rules) == 1
# Changing the file should re-enable it
ea.rule_hashes = {'rule1': 'abc'}
new_hashes = {'rule1': 'def'}
with mock.patch('elastalert.elastalert.get_rule_hashes') as mock_hashes:
with mock.patch('elastalert.elastalert.load_configuration') as mock_load:
mock_load.side_effect = [ea.disabled_rules[0]]
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
assert len(ea.rules) == 1
assert len(ea.disabled_rules) == 0
# Notify email is sent
ea.notify_email = 'qlo@example.com'
with mock.patch.object(ea, 'send_notification_email') as mock_email:
ea.handle_uncaught_exception(e, ea.rules[0])
assert mock_email.call_args_list[0][1] == {'exception': e, 'rule': ea.disabled_rules[0]}
def test_get_top_counts_handles_no_hits_returned(ea):
with mock.patch.object(ea, 'get_hits_terms') as mock_hits:
mock_hits.return_value = None
rule = ea.rules[0]
starttime = datetime.datetime.now() - datetime.timedelta(minutes=10)
endtime = datetime.datetime.now()
keys = ['foo']
all_counts = ea.get_top_counts(rule, starttime, endtime, keys)
assert all_counts == {'top_events_foo': {}}
def test_remove_old_events(ea):
now = ts_now()
minute = datetime.timedelta(minutes=1)
ea.rules[0]['processed_hits'] = {'foo': now - minute,
'bar': now - minute * 5,
'baz': now - minute * 15}
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=10)
# With a query delay, only events older than 20 minutes will be removed (none)
ea.rules[0]['query_delay'] = datetime.timedelta(minutes=10)
ea.remove_old_events(ea.rules[0])
assert len(ea.rules[0]['processed_hits']) == 3
# With no query delay, the 15 minute old event will be removed
ea.rules[0].pop('query_delay')
ea.remove_old_events(ea.rules[0])
assert len(ea.rules[0]['processed_hits']) == 2
assert 'baz' not in ea.rules[0]['processed_hits']
|
server.py | import socket
import json
from threading import Thread
from chat.models import *
from django.utils import timezone
import logging
import os
import queue
# 设置日志等级和格式
logger = logging.getLogger(__name__)
logging.basicConfig(format='[%(asctime)s] %(message)s', datefmt='%m/%d/%Y %H:%M:%S %p')
# 客户端连接池
client_pool = {}
voice_item_value = {}
voice_client = {}
send_queue = queue.Queue()
class Sender(Thread):
def run(self):
while True:
user, message, type_ = send_queue.get()
send_message(user, message, type_)
def send_message(user, message, type_=0):
"""
向指定用户发送消息
:param user: 发给的用户
:param message: 发送的消息
:param type_: 发送类型, 0-聊天消息, 1-被某人添加为好友, 2-被某人删除好友
3-被拉进群聊, 4-语音通话请求, 5-语音通话请求结果
"""
if type_ == 0:
# 发送聊天消息
if user.id in client_pool:
client_pool[user.id].sendall(wrap_message(message))
else:
# 记录未读消息
unread_message = UnreadMessage(user=user, message=message)
unread_message.save()
elif type_ == 1:
if user.id in client_pool and isinstance(message, User):
# 被某人添加为好友
client_pool[user.id].sendall(json.dumps({
'type': 1,
'data': {
'id': message.id,
'nickname': message.nickname
}
}).encode())
elif type_ == 2:
# 被某人删除好友
if user.id in client_pool and isinstance(message, User):
client_pool[user.id].sendall(json.dumps({
'type': 2,
'data': {
'id': message.id,
'nickname': message.nickname
}
}).encode())
elif type_ == 3:
# 被某人拉进群聊
if user.id in client_pool and isinstance(message, Chatroom):
client_pool[user.id].sendall(json.dumps({
'type': 3,
'data': {
'id': message.id,
'name': message.name
}
}).encode())
elif type_ == 4:
# 语音通话请求
if user.id in client_pool and isinstance(message, User):
client_pool[user.id].sendall(json.dumps({
'type': 4,
'from': {
'id': message.id,
'nickname': message.nickname
}
}).encode())
elif type_ == 5:
# 语音通话请求结果
if user.id in client_pool:
client_pool[user.id].sendall(json.dumps({
'type': 5,
'res': message['response'], # 0-同意, 1-不同意
'user': message['user']
}).encode())
def wrap_message(message):
"""包装向客户发送的消息"""
return json.dumps({
'type': 0,
'data': {
'from': {
'id': message.from_user.id,
'nickname': message.from_user.nickname
},
'to': message.to,
'content': message.content,
'time': timezone.localtime(message.createTime).isoformat()
}
}).encode()
def wrap_error_data(mes):
"""包装错误消息"""
return json.dumps({
'mes': mes
}).encode()
class VoiceServer(Thread):
def __init__(self):
super(VoiceServer, self).__init__()
self.port = int(os.environ.get('VOICE_SERVER_PORT'))
@staticmethod
def listener(client, user_id):
wrong_times = 0
to_id = 0
while True:
try:
data = client.recv(1024)
if not data:
raise ConnectionError
to_id = voice_item_value[user_id]
voice_client[to_id].sendall(data)
except (ConnectionError, ConnectionResetError):
try:
voice_client[to_id].close()
voice_client.pop(user_id)
voice_client.pop(voice_item_value[user_id])
voice_item_value.pop(voice_item_value[user_id])
voice_item_value.pop(user_id)
except KeyError:
try:
voice_client.pop(user_id)
except:
pass
except Exception as e:
logger.error(e)
break
except KeyError:
wrong_times += 1
if wrong_times >= 80:
client.close()
break
pass
except Exception as e:
voice_client.pop(user_id)
voice_client.pop(voice_item_value[user_id])
logger.error(e)
client.close()
break
def run(self):
global voice_item_value
server = socket.socket()
server.bind(('', self.port))
server.listen(5)
logger.warning(f'voice server start at {self.port}...')
while True:
client, addr = server.accept()
try:
client.settimeout(3)
data = client.recv(4096)
data = json.loads(data)
client.settimeout(None)
token = Token.objects.get(content=data.get('Authorization'))
voice_client[token.user.id] = client
client.sendall(json.dumps({
'mes': '',
'data': token.user.id
}).encode())
Thread(target=VoiceServer.listener, args=(client, token.user_id)).start()
except json.JSONDecodeError:
client.sendall(wrap_error_data('wrong data type'))
client.close()
except Token.DoesNotExist:
client.sendall(wrap_error_data('wrong token'))
client.close()
class Receiver(Thread):
def __init__(self, client, user_id, *args, **kwargs):
super(Receiver, self).__init__(*args, **kwargs)
self.client = client
self.client.settimeout(int(os.environ.get('RECV_TIME_OUT'))) # 接受消息超时
self.user_id = user_id
self.recv_buff = int(os.environ.get('TCP_RECV_BUFF')) # 接受缓冲区
self.user = User.objects.get(id=self.user_id)
def recv_message(self):
"""接受一个完整的消息"""
res = b""
while True:
try:
res += self.client.recv(self.recv_buff)
res = json.loads(res)
return res
except json.decoder.JSONDecodeError:
pass
def run(self):
while True:
try:
data = self.recv_message()
if 'type' in data:
# 语音通话相关
type_ = data['type']
if type_ == 0:
# 请求建立通话连接
send_message(User.objects.get(id=data['user_id']), self.user, type_=4)
elif type_ == 1:
# 请求通话回复
response = int(data['response']) # 0-同意, 1-不同意
from_id = int(data['from_id'])
if response == 0:
# 同意建立连接动作
voice_item_value[self.user_id] = from_id
voice_item_value[from_id] = self.user_id
# 告知请求方
send_message(User.objects.get(id=from_id), {
'response': response,
'user': self.user_id
}, type_=5)
continue
token = Token.objects.get(content=data.get('Authorization'))
to = int(data.get('to'))
message = Message(from_user=token.user, to=to, content=data.get('content'),
createTime=timezone.now())
message.save()
# 用户id小于100000
if to < 100000:
send_message(User.objects.get(id=to), message)
else:
chatroom = Chatroom.objects.get(id=to)
for user in chatroom.users.all():
# 群消息不发给自己
if user.id == token.user.id:
continue
send_message(user, message)
except json.decoder.JSONDecodeError:
try:
self.client.sendall(wrap_error_data('json decode error'))
except ConnectionAbortedError:
# 客户端断开连接
self.client.close()
client_pool.pop(self.user_id)
logger.warning(f'User<{self.user_id}> disconnect')
break
except Token.DoesNotExist:
self.client.sendall(wrap_error_data('token error'))
except User.DoesNotExist:
self.client.sendall(wrap_error_data('wrong user id'))
except ValueError:
self.client.sendall(wrap_error_data("wrong destination"))
except (ConnectionError, ConnectionAbortedError, ConnectionResetError):
# 客户端断开连接
try:
self.client.close()
client_pool.pop(self.user_id)
logger.warning(f'User<{self.user_id}> disconnect')
except Exception:
pass
break
except socket.timeout:
self.client.close()
client_pool.pop(self.user_id)
logger.warning(f'User<{self.user_id}> time out')
except Exception as e:
self.client.close()
logger.error(e)
break
class Server(Thread):
def __init__(self):
super(Server, self).__init__()
self.port = int(os.environ.get('TCP_SERVER_PORT'))
def run(self) -> None:
global client_pool
server = socket.socket()
server.bind(('', self.port))
server.listen(20)
logger.warning(f'bind {self.port}, start listening...')
while True:
client, addr = server.accept()
try:
logging.warning(f'connecting from {addr}')
client.settimeout(3)
data = json.loads(client.recv(4096))
client.settimeout(None)
token = Token.objects.get(content=data.get('Authorization'))
client_pool[token.user.id] = client
client.sendall(json.dumps({"mes": "", "data": token.user.id}).encode())
receiver = Receiver(client, token.user.id)
receiver.start()
logger.warning(f'User<{token.user.id}> connect from {addr}')
except json.decoder.JSONDecodeError:
try:
client.sendall(wrap_error_data('wrong data type'))
client.close()
except Exception as e:
logger.warning(e)
except Token.DoesNotExist:
try:
client.sendall(wrap_error_data('wrong token'))
client.close()
except Exception as e:
logger.warning(e)
except socket.timeout:
try:
logger.warning(f'{addr} time out')
client.sendall(wrap_error_data('time out'))
client.close()
except Exception as e:
logger.warning(e)
except (ConnectionResetError, ConnectionAbortedError, ConnectionError):
try:
client.close()
except Exception as e:
logger.warning(e)
except Exception as e:
logger.warning(e)
s = Server()
s.setDaemon(True)
s.start()
sender = Sender()
sender.setDaemon(True)
sender.start()
voice_server = VoiceServer()
voice_server.setDaemon(True)
voice_server.start()
|
stats_scaled.py | import logging
import logging
import multiprocessing
import abc
import math
import re
import nibabel as nib
import numpy as np
import os
import pandas
from kerosene.metrics.gauges import AverageGauge
from samitorch.inputs.patch import Patch, CenterCoordinate
from samitorch.inputs.transformers import ToNumpyArray, ApplyMask, \
ResampleNiftiImageToTemplate, LoadNifti, PadToPatchShape, RemapClassIDs
from samitorch.utils.files import extract_file_paths
from samitorch.utils.slice_builder import SliceBuilder
from torchvision.transforms import transforms
logging.basicConfig(level=logging.INFO)
LABELSFORTESTING = 0
LABELSFORTRAINNG = 1
ROIT1 = 2
T1 = 3
T1_1MM = 4
T1_IR = 5
T2_FLAIR = 6
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alphanum_key)
class AbstractPreProcessingPipeline(metaclass=abc.ABCMeta):
"""
Define a preprocessing pipeline.
"""
@staticmethod
def _get_image_affine(file):
return nib.load(file).affine
@staticmethod
def _get_image_header(file):
return nib.load(file).header
@abc.abstractmethod
def run(self, **kwargs):
"""
Run the preprocessing pipeline.
Args:
**kwargs: Optional keyword arguments.
"""
raise NotImplementedError
@staticmethod
def chunks(l, n):
return [l[i:i + n] for i in range(0, len(l), n)]
def _do_job(self, *args):
raise NotImplementedError
def _dispatch_jobs(self, files, num_jobs):
total = len(files)
chunk_size = int(total / num_jobs)
slices = iSEGPipeline.chunks(files, chunk_size)
jobs = []
for slice in slices:
j = multiprocessing.Process(target=self._do_job, args=(slice,))
jobs.append(j)
for j in jobs:
j.start()
def _dispatch_jobs_in_pool(self, files, num_jobs, func):
total = len(files)
chunk_size = math.ceil(total / num_jobs)
slices = iSEGPipeline.chunks(files, chunk_size)
pool = multiprocessing.Pool(processes=num_jobs)
return pool.map(func, slices)
@staticmethod
def get_patches_from_sample(sample, patch_size, step, keep_foreground_only: bool = True, keep_labels: bool = True):
slices = SliceBuilder(sample.x.shape, patch_size=patch_size, step=step).build_slices()
patches = list()
for slice in slices:
if keep_labels:
center_coordinate = CenterCoordinate(sample.x[tuple(slice)], sample.y[tuple(slice)])
patches.append(Patch(slice, 0, center_coordinate))
else:
patches.append(Patch(slice, 0, None))
if keep_foreground_only:
return np.array(list(filter(lambda patch: patch.center_coordinate.is_foreground, patches)))
else:
return patches
@staticmethod
def get_patches(image, patch_size, step):
slices = SliceBuilder(image.shape, patch_size=patch_size, step=step).build_slices()
patches = list()
for slice in slices:
patches.append(Patch(slice, 0, None))
return patches
@staticmethod
def get_filtered_patches(image, label, patch_size, step):
slices = SliceBuilder(image.shape, patch_size=patch_size, step=step).build_slices()
patches = list()
for slice in slices:
center_coordinate = CenterCoordinate(image[tuple(slice)], label[tuple(slice)])
patches.append(Patch(image[tuple(slice)], 0, center_coordinate))
return list(filter(lambda patch: patch.center_coordinate.is_foreground, patches))
class iSEGPipeline(AbstractPreProcessingPipeline):
LOGGER = logging.getLogger("iSEGPipeline")
PATCH_SIZE = (1, 32, 32, 32)
STEP = (1, 4, 4, 4)
def __init__(self, root_dir):
self._root_dir = root_dir
def run(self):
images_T1 = natural_sort(extract_file_paths(os.path.join(self._root_dir, "T1")))
labels = natural_sort(extract_file_paths(os.path.join(self._root_dir, "label")))
files = np.stack((np.array(images_T1), np.array(labels)), axis=1)
# self._dispatch_jobs(files, 8)
self._do_job(files)
def _do_job(self, files):
images = list()
for file in files:
self.LOGGER.info("Processing file {}".format(file[1]))
label = self._to_numpy_array(file[1])
label = self._remap_class_ids(label)
self.LOGGER.info("Processing file {}".format(file[0]))
t1 = self._to_numpy_array(file[0])
images.append(t1)
images_np = list()
for image in images:
images_np.append(list(map(lambda patch: patch.slice, image)))
images = [item for sublist in images_np for item in sublist]
dataset_mean = np.mean([np.mean(image) for image in images])
std = np.sqrt(np.array([(((image - dataset_mean) ** 2).mean()) for image in images]).mean())
print("Mean: {}".format(np.mean(dataset_mean)))
print("Std: {}".format(np.mean(np.array(std))))
def _to_numpy_array(self, file):
transform_ = transforms.Compose([ToNumpyArray()])
return transform_(file)
def _remap_class_ids(self, file):
transform_ = transforms.Compose([RemapClassIDs([10, 150, 250], [1, 2, 3])])
return transform_(file)
def _extract_patches(self, image, label, patch_size, step):
transforms_ = transforms.Compose([PadToPatchShape(patch_size=patch_size, step=step)])
transformed_image = transforms_(image)
transformed_label = transforms_(label)
return iSEGPipeline.get_filtered_patches(transformed_image, transformed_label, patch_size, step)
class MRBrainSPipeline(AbstractPreProcessingPipeline):
LOGGER = logging.getLogger("MRBrainSPipeline")
PATCH_SIZE = (1, 32, 32, 32)
STEP = (1, 4, 4, 4)
def __init__(self, root_dir):
self._root_dir = root_dir
def run(self):
source_paths = list()
for subject in sorted(os.listdir(os.path.join(self._root_dir))):
source_paths.append(extract_file_paths(os.path.join(self._root_dir, subject)))
self._dispatch_jobs(source_paths, 8)
# self._do_job(source_paths)
def _do_job(self, files):
images = list()
for file in files:
self.LOGGER.info("Processing file {}".format(file[LABELSFORTESTING]))
label_for_testing = MRBrainSPipeline.resample_to_template(file[LABELSFORTESTING], file[T1_1MM],
interpolation="linear")
label_for_testing = MRBrainSPipeline.to_numpy_array(label_for_testing)
label_for_testing = label_for_testing.transpose((3, 0, 1, 2))
label_for_testing = np.rot90(label_for_testing, axes=(1, -2))
self.LOGGER.info("Processing file {}".format(file[T1]))
t1 = MRBrainSPipeline.resample_to_template(file[T1], file[T1_1MM], interpolation="continuous")
t1 = MRBrainSPipeline.to_numpy_array(t1)
t1 = t1.transpose((3, 0, 1, 2))
t1 = np.rot90(t1, axes=(1, -2))
t1 = MRBrainSPipeline.apply_mask(t1, label_for_testing)
images.append(t1)
images_np = list()
for image in images:
images_np.append(list(map(lambda patch: patch.slice, image)))
patches = [item for sublist in images_np for item in sublist]
dataset_mean = np.mean([np.mean(patch) for patch in patches])
std = np.sqrt(np.array([(((patch - dataset_mean) ** 2).mean()) for patch in patches]).mean())
print("Mean: {}".format(np.mean(dataset_mean)))
print("Std: {}".format(np.mean(std)))
@staticmethod
def pad_to_shape(image, patch_size, step):
transforms_ = transforms.Compose([PadToPatchShape(patch_size=patch_size, step=step)])
return transforms_(image)
@staticmethod
def to_numpy_array(file):
transform_ = transforms.Compose([ToNumpyArray()])
return transform_(file)
@staticmethod
def resample_to_template(file, nifti_template, interpolation):
transforms_ = transforms.Compose([LoadNifti(),
ResampleNiftiImageToTemplate(clip=False,
template=nifti_template,
interpolation=interpolation)])
return transforms_(file)
@staticmethod
def apply_mask(file, mask):
transform_ = transforms.Compose([ApplyMask(mask)])
return transform_(file)
@staticmethod
def extract_patches(image, label, patch_size, step):
transforms_ = transforms.Compose([PadToPatchShape(patch_size=patch_size, step=step)])
transformed_image = transforms_(image)
transformed_label = transforms_(label)
return ABIDEPreprocessingPipeline.get_filtered_patches(transformed_image, transformed_label, patch_size, step)
class ABIDEPreprocessingPipeline(AbstractPreProcessingPipeline):
"""
An ABIDE data pre-processing pipeline. Extract necessary tissues for brain segmentation among other transformations.
"""
LOGGER = logging.getLogger("PreProcessingPipeline")
PATCH_SIZE = (1, 32, 32, 32)
STEP = (1, 4, 4, 4)
def __init__(self, csv_path):
"""
Pre-processing pipeline constructor.
Args:
root_dir: Root directory where all files are located.
"""
self._csv_path = csv_path
self._mean_gauge = AverageGauge()
self._std_gauge = AverageGauge()
self._dataset_mean = 0.0
def run(self, prefix: str = ""):
files = pandas.read_csv(self._csv_path)
images_T1 = np.asarray(files["T1"])
labels = np.asarray(files["labels"])
files = np.stack((np.array(images_T1), np.array(labels)), axis=1)
means = self._dispatch_jobs_in_pool(files, 8, self._find_average)
self._dataset_mean = np.mean(means)
std = self._dispatch_jobs_in_pool(files, 8, self._find_std)
std = np.sqrt(np.mean(std))
print("Dataset mean : {}".format(self._dataset_mean))
print("Dataset std : {}".format(std))
def _find_average(self, files):
for file in files:
self.LOGGER.info("Processing file {}".format(file[1]))
label = self._to_numpy_array(file[1])
self.LOGGER.info("Processing file {}".format(file[0]))
t1 = self._to_numpy_array(file[0])
patches = self._extract_patches(t1, label, self.PATCH_SIZE, self.STEP)
patches = list(map(lambda patch: patch.slice, patches))
for patch in patches:
mean = np.mean(patch)
self._mean_gauge.update(mean)
return self._mean_gauge.compute()
def _find_std(self, files):
for file in files:
self.LOGGER.info("Processing file {}".format(file[1]))
label = self._to_numpy_array(file[1])
self.LOGGER.info("Processing file {}".format(file[0]))
t1 = self._to_numpy_array(file[0])
patches = self._extract_patches(t1, label, self.PATCH_SIZE, self.STEP)
patches = list(map(lambda patch: patch.slice, patches))
for patch in patches:
std = np.array(((patch - self._dataset_mean) ** 2).mean())
self._std_gauge.update(std)
return self._std_gauge.compute()
def _do_job(self, files):
std = np.sqrt(self._std_gauge.compute())
print("Mean: {}".format(np.mean(self._mean_gauge.compute())))
print("Std: {}".format(np.mean(std)))
def _to_numpy_array(self, file):
transform_ = transforms.Compose([ToNumpyArray()])
return transform_(file)
def _extract_patches(self, image, label, patch_size, step):
transforms_ = transforms.Compose([PadToPatchShape(patch_size=patch_size, step=step)])
transformed_image = transforms_(image)
transformed_label = transforms_(label)
return ABIDEPreprocessingPipeline.get_filtered_patches(transformed_image, transformed_label, patch_size, step)
class MultipleDatasetPipeline(AbstractPreProcessingPipeline):
"""
An ABIDE data pre-processing pipeline. Extract necessary tissues for brain segmentation among other transformations.
"""
LOGGER = logging.getLogger("PreProcessingPipeline")
PATCH_SIZE = (1, 32, 32, 32)
STEP = (1, 4, 4, 4)
def __init__(self, root_dirs):
"""
Pre-processing pipeline constructor.
Args:
root_dir: Root directory where all files are located.
"""
self._root_dirs = root_dirs
self._mean_gauge_iSEG = AverageGauge()
self._mean_gauge_MRBrainS = AverageGauge()
self._mean_gauge_ABIDE = AverageGauge()
self._std_gauge_iSEG = AverageGauge()
self._std_gauge_MRBrainS = AverageGauge()
self._std_gauge_ABIDE = AverageGauge()
self._dataset_mean_iSEG = 0.0
self._dataset_mean_MRBrainS = 0.0
self._dataset_mean_ABIDE = 0.0
self._dataset_std_iSEG = 0.0
self._dataset_std_MRBrainS = 0.0
self._dataset_std_ABIDE = 0.0
def run(self, prefix: str = ""):
images_T1 = pandas.read_csv(self._root_dirs["iSEG"])["T1"]
labels = pandas.read_csv(self._root_dirs["iSEG"])["labels"]
files = np.stack((np.array(images_T1), np.array(labels)), axis=1)
self._dataset_mean_iSEG = np.mean(self._dispatch_jobs_in_pool(files, 8, self._get_mean_iseg))
self._dataset_std_iSEG = np.mean(self._dispatch_jobs_in_pool(files, 8, self._get_std_iseg))
images_T1 = pandas.read_csv(self._root_dirs["MRBrainS"])["T1"]
images_T1_1mm = pandas.read_csv(self._root_dirs["MRBrainS"])["T1_1mm"]
labels = pandas.read_csv(self._root_dirs["MRBrainS"])["LabelsForTesting"]
files = np.stack((np.array(images_T1), np.array(images_T1_1mm), np.array(labels)), axis=1)
self._dataset_mean_MRBrainS = np.mean(self._dispatch_jobs_in_pool(files, 8, self._get_mean_mrbrains))
self._dataset_std_MRBrainS = np.mean(self._dispatch_jobs_in_pool(files, 8, self._get_std_mrbrains))
files = pandas.read_csv(self._root_dirs["ABIDE"])
images_T1 = np.asarray(files["T1"])
labels = np.asarray(files["labels"])
files = np.stack((np.array(images_T1), np.array(labels)), axis=1)
self._dataset_mean_ABIDE = np.mean(self._dispatch_jobs_in_pool(files, 8, self._get_mean_abide))
self._dataset_std_ABIDE = np.mean(self._dispatch_jobs_in_pool(files, 8, self._get_std_abide))
print("Triple Dataset mean: {}".format(
np.mean([self._dataset_mean_iSEG, self._dataset_mean_MRBrainS, self._dataset_mean_ABIDE])))
print("Triple Dataset std: {}".format(
np.sqrt(np.mean([self._dataset_std_iSEG, self._dataset_std_MRBrainS, self._dataset_std_ABIDE]))))
print("Dual Dataset mean: {}".format(
np.mean([self._dataset_mean_iSEG, self._dataset_mean_MRBrainS])))
print("Dual Dataset std: {}".format(
np.sqrt(np.mean([self._dataset_std_iSEG, self._dataset_std_MRBrainS]))))
def _get_mean_iseg(self, files):
for file in files:
self.LOGGER.info("Processing file {}".format(file[1]))
label = self._to_numpy_array(file[1])
self.LOGGER.info("Processing file {}".format(file[0]))
t1 = self._to_numpy_array(file[0])
mean = np.mean(t1)
self._mean_gauge_iSEG.update(mean)
return self._mean_gauge_iSEG.compute()
def _get_std_iseg(self, files):
for file in files:
self.LOGGER.info("Processing file {}".format(file[1]))
label = self._to_numpy_array(file[1])
self.LOGGER.info("Processing file {}".format(file[0]))
t1 = self._to_numpy_array(file[0])
std = np.array(((t1 - self._dataset_mean_iSEG) ** 2).mean())
self._std_gauge_iSEG.update(std)
return self._std_gauge_iSEG.compute()
def _get_mean_mrbrains(self, files):
for file in files:
self.LOGGER.info("Processing file {}".format(file[2]))
label_for_testing = MRBrainSPipeline.resample_to_template(file[2], file[1],
interpolation="linear")
label_for_testing = self._to_numpy_array(label_for_testing)
label_for_testing = label_for_testing.transpose((3, 0, 1, 2))
label_for_testing = np.rot90(label_for_testing, axes=(1, -2))
self.LOGGER.info("Processing file {}".format(file[0]))
t1 = MRBrainSPipeline.resample_to_template(file[0], file[1], interpolation="continuous")
t1 = self._to_numpy_array(t1)
t1 = t1.transpose((3, 0, 1, 2))
t1 = np.rot90(t1, axes=(1, -2))
t1 = MRBrainSPipeline.apply_mask(t1, label_for_testing)
mean = np.mean(t1)
self._mean_gauge_MRBrainS.update(mean)
return self._mean_gauge_MRBrainS.compute()
def _get_std_mrbrains(self, files):
for file in files:
self.LOGGER.info("Processing file {}".format(file[2]))
label_for_testing = MRBrainSPipeline.resample_to_template(file[2], file[1],
interpolation="linear")
label_for_testing = self._to_numpy_array(label_for_testing)
label_for_testing = label_for_testing.transpose((3, 0, 1, 2))
label_for_testing = np.rot90(label_for_testing, axes=(1, -2))
self.LOGGER.info("Processing file {}".format(file[1]))
t1 = MRBrainSPipeline.resample_to_template(file[0], file[1], interpolation="continuous")
t1 = self._to_numpy_array(t1)
t1 = t1.transpose((3, 0, 1, 2))
t1 = np.rot90(t1, axes=(1, -2))
t1 = MRBrainSPipeline.apply_mask(t1, label_for_testing)
std = np.array(((t1 - self._dataset_mean_MRBrainS) ** 2).mean())
self._std_gauge_MRBrainS.update(std)
return self._std_gauge_MRBrainS.compute()
def _get_mean_abide(self, files):
for file in files:
self.LOGGER.info("Processing file {}".format(file[1]))
label = self._to_numpy_array(file[1])
self.LOGGER.info("Processing file {}".format(file[0]))
t1 = self._to_numpy_array(file[0])
mean = np.mean(t1)
self._mean_gauge_ABIDE.update(mean)
return self._mean_gauge_ABIDE.compute()
def _get_std_abide(self, files):
for file in files:
self.LOGGER.info("Processing file {}".format(file[1]))
label = self._to_numpy_array(file[1])
self.LOGGER.info("Processing file {}".format(file[0]))
t1 = self._to_numpy_array(file[0])
std = np.array(((t1 - self._dataset_mean_ABIDE) ** 2).mean())
self._std_gauge_ABIDE.update(std)
return self._std_gauge_ABIDE.compute()
def _to_numpy_array(self, file):
transform_ = transforms.Compose([ToNumpyArray()])
return transform_(file)
def _extract_patches(self, image, label, patch_size, step):
transforms_ = transforms.Compose([PadToPatchShape(patch_size=patch_size, step=step)])
transformed_image = transforms_(image)
transformed_label = transforms_(label)
return MultipleDatasetPipeline.get_filtered_patches(transformed_image, transformed_label, patch_size, step)
if __name__ == "__main__":
# iSEGPipeline("/mnt/md0/Data/Direct/iSEG/Training").run()
# MRBrainSPipeline("/mnt/md0/Data/Direct/MRBrainS/DataNii/TrainingData").run()
# ABIDEPreprocessingPipeline("/home/pierre-luc-delisle/ABIDE/5.1/output_abide_images.csv").run()
MultipleDatasetPipeline({"iSEG": "/mnt/md0/Data/iSEG_scaled/Training/output_iseg_images.csv",
"MRBrainS": "/mnt/md0/Data/MRBrainS_scaled/DataNii/TrainingData/output_mrbrains_images.csv",
"ABIDE": "/mnt/md0/Data/ABIDE_scaled/output_abide_images.csv"}).run()
|
testutils.py | #!/usr/bin/python
# Authors:
# 2020: Wolfgang Fahl https://github.com/WolfgangFahl
# 2021: Lin Gao https://github.com/gaol
#
# This test starter borrows lots from https://github.com/rc-dukes/vertx-eventbus-python, thanks to Wolfgang Fahl
#
from datetime import datetime
from hashlib import md5
import os
import time
from subprocess import Popen, PIPE
import tempfile
from threading import Thread, Condition
import requests
STARTER_FAIL_INDICATOR = "Failed to start the TCP EventBus Bridge"
DEFAULT_WAIT_FOR = "Welcome to use EventBus Starter"
JAR_URL_TEMPLATE = "https://github.com/gaol/test-eventbus-bridge/releases/download/%s/test-ebridge-%s-fat.jar"
_FAT_JARS_ = {'1.0.0': '05cd3e187bf516db4685abb15c9bf983',
'1.0.1': 'a5c837430e98357c5ee5e533cf22b5e9'}
class EventBusBridgeStarter:
"""
An Vertx EventBus Bridge Starter, used to start a bridge server for integration testing
"""
def __init__(self, jar_version='1.0.1', port=7000, wait_for=DEFAULT_WAIT_FOR, debug=False, conf=None):
"""
construct me
Args:
jar_version(str: the bridge jar version to use
port(int): the port to listen to, defaults to 7000
wait_for(str): the output string on stderr of the java process to wait for
debug(bool): True if debugging output should be shown else False - default: False
"""
self.port = port
self.wait_for = wait_for
self.process = None
self.started = False
self.debug = debug
self.conf = conf
if jar_version not in _FAT_JARS_:
print("%s is not a known version" % jar_version)
exit(1)
self.jar_version = jar_version
temp_dir = tempfile.gettempdir().lower()
self.jar_file = "%s/test-ebridge-%s-fat.jar" % (temp_dir, jar_version)
self.jar_url = JAR_URL_TEMPLATE % (jar_version, jar_version)
self.failed = False
def start(self):
try:
if not os.path.exists(self.jar_file):
if self.debug:
print("Downloading bridge jar from %s" % self.jar_url)
req = requests.get(self.jar_url)
with open(self.jar_file, 'wb') as f:
f.write(req.content)
f.close()
with open(self.jar_file, 'rb') as f:
jar_md5 = md5(f.read()).hexdigest()
if _FAT_JARS_[self.jar_version] != jar_md5:
print("%s is not a valid test eventbus bridge jar file" % self.jar_file)
exit(1)
if self.conf is None:
self.process = Popen(['java', '-jar', self.jar_file], stderr=PIPE)
elif type(self.conf) is dict or os.path.exists(self.conf):
self.process = Popen(['java', '-jar', '-conf', self.conf, self.jar_file], stderr=PIPE)
t = Thread(target=self._handle_output)
t.daemon = True # thread dies with the program
t.start()
# you need to wait for started with expected output
except IOError as e:
print(e)
raise e
def wait_started(self, time_out=30.0, time_step=0.1):
""" wait for the java server to be started
Args:
time_out(float): the timeout in secs after which the wait fails with an Exception
time_step(float): the time step in secs in which the state should be regularly checked
:raise:
:exception: wait timed out
:exception: if bridge failed to start
"""
time_left = time_out
while not self.started and not self.failed and time_left > 0:
time.sleep(time_step)
time_left = time_left - time_step
if time_left <= 0:
raise Exception("wait for start time_out after %.3f secs" % time_out)
if self.failed:
raise Exception("failed to start vertx eventbus bridge")
if self.debug:
print("wait for start successful after %.3f secs" % (time_out - time_left))
def _handle_output(self):
""" handle the output of the java program"""
out = self.process.stderr
for bline in iter(out.readline, b''):
line = bline.decode('utf8')
if self.debug:
print("Java: %s" % line)
if self.wait_for in line:
self.started = True
if STARTER_FAIL_INDICATOR in line:
self.failed = True
self.stop()
break
out.close()
def stop(self):
if self.started:
self.process.kill()
self.process.wait()
self.process = None
self.started = False
def start_async(self, delay=5):
def _start_internal(_delay=delay):
time.sleep(_delay)
self.start()
self.wait_started()
start_thread = Thread(target=_start_internal, args=(delay,))
start_thread.daemon = True
start_thread.start()
class CountDownLatch:
"""
CountDownLatch can be used for async testing
"""
def __init__(self, count=1):
self.count = count
self.condition = Condition()
def awaits(self, timeout=None, to_count=0):
try:
self.condition.acquire()
start = datetime.now()
while self.count > to_count:
self.condition.wait(timeout / 10) # divides each step by 10
if timeout is not None:
spent = (datetime.now() - start).seconds
if spent > timeout:
raise Exception("timeout after waiting for %d seconds!" % spent)
finally:
self.condition.release()
def count_down(self):
try:
self.condition.acquire()
self.count -= 1
self.condition.notifyAll()
finally:
self.condition.release()
def get_count(self):
return self.count
|
server.py | #-----------------------------------------------
#Libraries
#-----------------------------------------------
#Libraries used:
#sys - System
#BaseHTTPRequestHandler - A request handler for the HTTP protocol
#HTTPServer - Server implementation for the HTTP protocol
#threading - Concurrent thread implementation
#time - Standard timing library
#RPi.GPIO - Standard Raspberry Pi GPIO library
#EOZ_IP40 - API module for keypad
#RS_Pro_150N - API module for magnetic lock
#Try to import the libraries and exit the program if some are missing
import sys
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
import threading
import time
import RPi.GPIO as GPIO
import EOZ_IP40 as Keypad
import RS_Pro_150N as Lock
except Exception as e:
print(e)
sys.exit(1)
#-----------------------------------------------
#Global Variables
#-----------------------------------------------
#Hardware module objects
Lock1 = Lock.Maglock(lock_open=False) #The object associated with the magnetic lock hardware module
Keypad1 = Keypad.Keypad(key_buffer_en=True) #The object associated with the keypad hardware module
#Passcodes
maincode = [] #The permanent passcode, does not reset after use
tempcode = [] #The temporary passcode, resets after use
#-----------------------------------------------
#Class Definition
#-----------------------------------------------
#N.B.!: Due to time contraints, this demonstrator does not possess end-to-end encryption, or device authentication measures. This demonstrator was meant to show
# how the hardware modules could be remotely accessed and does not contain the security features that a real world implementation of the hardware would require
# to function without security breaches.
class RequestHandler_httpd(BaseHTTPRequestHandler):
#-----------------------------------------------
#Class Variables
#-----------------------------------------------
Request = [] #The HTTP request message received by the server
#-----------------------------------------------
#Class Functions
#-----------------------------------------------
def do_GET(self):
"""The server receive callback method for use by the server thread"""
global maincode, tempcode
#Catch exceptions
try:
#Send message to accessing device
messagetosend = bytes('Connection Established',"utf")
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', len(messagetosend))
self.end_headers()
self.wfile.write(messagetosend)
#Receive HTTP request from accessing device
self.Request = self.requestline
#Remove extraneous information
self.Request = self.Request[5 : int(len(self.Request)-9)]
#Parse Request
if self.Request =="favicon.ico":
pass #do nothing, this thing is just annoying
elif self.Request == 'on':
#Activate Lock1
Lock1.activate_lock()
print("Lock activation request received")
return
elif self.Request == 'off':
#Deactivate Lock1
Lock1.deactivate_lock()
print("Lock deactivation request received")
return
else:
#Extract 4-digit code from HTTP request
code = self.Request[int(len(self.Request)-4):]
self.Request = self.Request[:int(len(self.Request)-5)]
#Parse Request and Code
if self.Request == 'setMain':
#Set Permanent Passcode
maincode = []
for char in code:
maincode.append(char)
print("MainPasscode change request received")
return
elif self.Request == 'setTemp':
#Set Temporary Passcode
tempcode = []
for char in code:
tempcode.append(char)
print("TempPasscode change request received")
return
except Exception as e:
print(e)
#-----------------------------------------------
#Functions
#-----------------------------------------------
#Server Functions
#-----------------------------------------------
def start_server():
"""Creates a thread and runs the server on it indefinately"""
#Start the server and run it until the user interupts it
try:
#Define the IP address of the server
server_address_httpd = ('192.168.0.120',8080) #I don't know how to implement this in a non-static manner
#Create server and server thread
httpd = HTTPServer(server_address_httpd, RequestHandler_httpd)
server_thread = threading.Thread(target=httpd.serve_forever)
#Start server
server_thread.start()
print('Starting Server')
except KeyboardInterrupt:
#Exit the system when the user requests
sys.exit(0)
#Main Function
#-----------------------------------------------
def main():
"""Begins the HTTP server and listens for valid inputs from both the server and Keypad1 to control Lock1"""
global maincode, tempcode
#Initialize the server
start_server()
#Run until the user interupts the thread
try:
while True:
#Reset the flag that controls automatic locking after a period of time
close_lock_after_delay = False
#Pass if a 4-digit code has not been entered
if len(Keypad1.key_buffer) == 4:
x = Keypad1.fetch_all()
#Compare input code to the permanent and temporary codes
if (len(maincode) == 4) & (x == maincode):
print("maincode entered correctly")
#Deactivate Lock1
Lock1.deactivate_lock()
#Raise flag
close_lock_after_delay = True
elif (len(tempcode) == 4) & (x == tempcode):
print("tempcode entered correctly")
#Deactivate Lock1
Lock1.deactivate_lock()
#Raise flag
close_lock_after_delay = True
#Empty the temporary code array, effectively deleting the code
tempcode = []
else:
print("Password incorrect")
#Check if error has occured and more than 4-digits have been inputted
elif len(Keypad1.key_buffer) > 4:
print("Error: too many inputs")
#Remove all charcters from array and discard them
x = Keypad1.fetch_all()
#Begin countdown to automaticly lock after a certain period of time, if flag is raised
if close_lock_after_delay == True:
#Sleep for 10 seconds
time.sleep(10)
#Activate Lock1
Lock1.activate_lock()
except KeyboardInterrupt:
#Cleanup the GPIO pins and exit the system when the user requests it
Lock1.cleanup_gpio()
Keypad1.cleanup_gpio()
sys.exit(0)
#Run main method if file is called directly
if __name__ == "__main__":
main() |
util.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os, sys, re, json
import platform
import shutil
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urlparse
import urllib
import threading
from i18n import _
base_units = {'ARG':8, 'mARG':5, 'uARG':2}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
is_verbose = False
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.func_name
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_path():
path = android_ext_dir() + '/org.electrum_arg.electrum_arg/blockchain_headers'
d = os.path.dirname(path)
if not os.path.exists(d):
os.mkdir(d)
return path
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum-arg'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_path()
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_path(config):
if 'ANDROID_DATA' in os.environ:
return android_headers_path()
else:
return os.path.join(config.path, 'blockchain_headers')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum-arg")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum-ARG")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-ARG")
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
'''Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator'''
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, is_diff=False, num_zeros = 8, decimal_point = 8, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
x = int(x) # Some callers pass Decimal
scale_factor = pow (10, decimal_point)
integer_part = "{:n}".format(int(abs(x) / scale_factor))
if x < 0:
integer_part = '-' + integer_part
elif is_diff:
integer_part = '+' + integer_part
dp = localeconv()['decimal_point']
fract_part = ("{:0" + str(decimal_point) + "}").format(abs(x) % scale_factor)
fract_part = fract_part.rstrip('0')
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result.decode('utf8')
def timestamp_to_datetime(timestamp):
try:
return datetime.fromtimestamp(timestamp)
except:
return None
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
block_explorer_info = {
'BlockExperts': ('http://www.blockexperts.com/arg',
{'tx': 'tx', 'addr': 'address'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'}),
}
def block_explorer(config):
return config.get('block_explorer', 'BlockExperts')
def block_explorer_tuple(config):
return block_explorer_info.get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return "/".join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
import bitcoin
from bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise BaseException("Not an argentum address")
return {'address': uri}
u = urlparse.urlparse(uri)
if u.scheme != 'argentum':
raise BaseException("Not an argentum URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urlparse.parse_qs(query)
else:
pq = urlparse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise BaseException("Invalid argentum address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message'].decode('utf8')
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bitcoin.base_decode(out['sig'], None, base=58).encode('hex')
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
if type(message) == unicode:
message = message.encode('utf8')
query.append('message=%s'%urllib.quote(message))
p = urlparse.ParseResult(scheme='argentum', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urlparse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import __builtin__
builtin_raw_input = __builtin__.raw_input
__builtin__.raw_input = raw_input
def parse_json(message):
n = message.find('\n')
if n==-1:
return None, message
try:
j = json.loads( message[0:n] )
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import errno
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = ''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = ''
except:
traceback.print_exc(file=sys.stderr)
data = ''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
self._send(out)
def send_all(self, requests):
out = ''.join(map(lambda x: json.dumps(x) + '\n', requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
except socket.error as e:
if e[0] in (errno.EWOULDBLOCK,errno.EAGAIN):
print_error("EAGAIN: retrying")
time.sleep(0.1)
continue
elif e[0] in ['timed out', 'The write operation timed out']:
print_error("socket timeout, retry")
time.sleep(0.1)
continue
else:
traceback.print_exc(file=sys.stdout)
raise e
import Queue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else Queue.Queue()
self.get_queue = get_queue if get_queue else Queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except Queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except Queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def check_www_dir(rdir):
import urllib, urlparse, shutil, os
if not os.path.exists(rdir):
os.mkdir(rdir)
index = os.path.join(rdir, 'index.html')
if not os.path.exists(index):
print_error("copying index.html")
src = os.path.join(os.path.dirname(__file__), 'www', 'index.html')
shutil.copy(src, index)
files = [
"https://code.jquery.com/jquery-1.9.1.min.js",
"https://raw.githubusercontent.com/davidshimjs/qrcodejs/master/qrcode.js",
"https://code.jquery.com/ui/1.10.3/jquery-ui.js",
"https://code.jquery.com/ui/1.10.3/themes/smoothness/jquery-ui.css"
]
for URL in files:
path = urlparse.urlsplit(URL).path
filename = os.path.basename(path)
path = os.path.join(rdir, filename)
if not os.path.exists(path):
print_error("downloading ", URL)
urllib.urlretrieve(URL, path)
|
test_notification_manager.py | import sys
import threading
import time
import warnings
from typing import List
import pytest
from napari.utils.notifications import (
Notification,
notification_manager,
show_error,
show_info,
show_warning,
)
# capsys fixture comes from pytest
# https://docs.pytest.org/en/stable/logging.html#caplog-fixture
def test_keyboard_interupt_handler(capsys):
with pytest.raises(SystemExit):
notification_manager.receive_error(
KeyboardInterrupt, KeyboardInterrupt(), None
)
class PurposefulException(Exception):
pass
def test_notification_repr_has_message():
assert "='this is the message'" in repr(
Notification("this is the message")
)
def test_notification_manager_no_gui(monkeypatch):
"""
Direct test of the notification manager.
This does not test the integration with the gui, but test that the
notification manager itself can receive a info, warning or error.
"""
try:
from napari._qt.dialogs.qt_notification import NapariQtNotification
monkeypatch.setattr(NapariQtNotification, "DISMISS_AFTER", 0)
except ModuleNotFoundError:
pass
previous_exhook = sys.excepthook
with notification_manager:
notification_manager.records.clear()
# save all of the events that get emitted
store: List[Notification] = []
notification_manager.notification_ready.connect(store.append)
show_info('this is one way of showing an information message')
assert (
len(notification_manager.records) == 1
), notification_manager.records
assert store[-1].type == 'info'
notification_manager.receive_info(
'This is another information message'
)
assert len(notification_manager.records) == 2
assert len(store) == 2
assert store[-1].type == 'info'
# test that exceptions that go through sys.excepthook are catalogued
with pytest.raises(PurposefulException):
raise PurposefulException("this is an exception")
# pytest intercepts the error, so we can manually call sys.excepthook
assert sys.excepthook == notification_manager.receive_error
try:
raise ValueError("a")
except ValueError:
sys.excepthook(*sys.exc_info())
assert len(notification_manager.records) == 3
assert len(store) == 3
assert store[-1].type == 'error'
# test that warnings that go through showwarning are catalogued
# again, pytest intercepts this, so just manually trigger:
assert warnings.showwarning == notification_manager.receive_warning
warnings.showwarning('this is a warning', UserWarning, '', 0)
assert len(notification_manager.records) == 4
assert store[-1].type == 'warning'
show_error('This is an error')
assert len(notification_manager.records) == 5
assert store[-1].type == 'error'
show_warning('This is a warning')
assert len(notification_manager.records) == 6
assert store[-1].type == 'warning'
# make sure we've restored the except hook
assert sys.excepthook == previous_exhook
assert all(isinstance(x, Notification) for x in store)
def test_notification_manager_no_gui_with_threading():
"""
Direct test of the notification manager.
This does not test the integration with the gui, but test that
exceptions and warnings from threads are correctly captured.
"""
def _warn():
time.sleep(0.01)
warnings.showwarning('this is a warning', UserWarning, '', 0)
def _raise():
time.sleep(0.01)
with pytest.raises(PurposefulException):
raise PurposefulException("this is an exception")
previous_threading_exhook = threading.excepthook
with notification_manager:
notification_manager.records.clear()
# save all of the events that get emitted
store: List[Notification] = []
notification_manager.notification_ready.connect(store.append)
# Test exception inside threads
assert (
threading.excepthook == notification_manager.receive_thread_error
)
exception_thread = threading.Thread(target=_raise)
exception_thread.start()
time.sleep(0.02)
try:
raise ValueError("a")
except ValueError:
threading.excepthook(sys.exc_info())
assert len(notification_manager.records) == 1
assert store[-1].type == 'error'
# Test warning inside threads
assert warnings.showwarning == notification_manager.receive_warning
warning_thread = threading.Thread(target=_warn)
warning_thread.start()
for _ in range(100):
time.sleep(0.01)
if (
len(notification_manager.records) == 2
and store[-1].type == 'warning'
):
break
else:
raise AssertionError("Thread notification not received in time")
# make sure we've restored the threading except hook
assert threading.excepthook == previous_threading_exhook
assert all(isinstance(x, Notification) for x in store)
|
collective_ops_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 Collective Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from absl.testing import parameterized
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import test_util
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops as _collective_ops
from tensorflow.python.platform import test
class CollectiveOpsV1(object):
all_reduce = _collective_ops.all_reduce
all_gather = _collective_ops.all_gather
class CollectiveOpsV2(object):
@staticmethod
def all_reduce(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_reduce_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def all_gather(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_gather_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
device_combination = (
combinations.combine(device='CPU', communication='RING', required_gpus=0) +
combinations.combine(
device='GPU', communication=['RING', 'NCCL'], required_gpus=2))
@combinations.generate(
combinations.times(
combinations.combine(
collective_ops=[
combinations.NamedObject('v1', CollectiveOpsV1),
combinations.NamedObject('v2', CollectiveOpsV2)
],
mode='eager'), device_combination))
class CollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testReduce(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_reduce_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2devices():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
def testGather(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_gather_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_gather_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_gather_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_gather_2devices():
self.assertAllClose(result, [1., 1.], rtol=1e-5, atol=1e-5)
def testInstanceKeyScopedUnderGroupKey(self, collective_ops, device,
communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
dev2 = '/device:%s:2' % device
dev3 = '/device:%s:3' % device
@def_function.function
def run_all_reduce_4devices_same_instance_key():
# Use a common instance key for both groups.
instance_key = 0
# We will create 2 groups each with 2 devices.
group_size = 2
# Group 0 comprises dev0 and dev1.
group0_key = 0
# Group 1 comprises dev2 and dev3.
group1_key = 1
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(1.), group_size, group0_key, instance_key))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(2.), group_size, group0_key, instance_key))
with ops.device(dev2):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(3.), group_size, group1_key, instance_key))
with ops.device(dev3):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(4.), group_size, group1_key, instance_key))
return collectives
results = run_all_reduce_4devices_same_instance_key()
self.assertAllClose(results[0], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[2], 7., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[3], 7., rtol=1e-5, atol=1e-5)
def testCollectiveGroupSizeOne(self, collective_ops, device, communication):
if communication == 'NCCL':
self.skipTest('b/170672646: it crashes with NCCL and group size one')
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
instance_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with ops.device(dev0):
gathered_tensor = collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
self.assertAllEqual(in_value, gathered_tensor.numpy())
def testMultipleGroups(self, collective_ops, device, communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
num_elements = 4
@def_function.function
def run_all_reduce(group_size, group_key):
instance_key = group_key
input_value = [float(group_key) for i in range(num_elements)]
collectives = []
for device_idx in range(group_size):
with ops.device('/{}:{}'.format(device, device_idx)):
input_tensor = constant_op.constant(input_value)
collectives.append(
collective_ops.all_reduce(
input_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
def run_and_assert(group_size, group_key):
for reduced_tensor in run_all_reduce(group_size, group_key):
self.assertAllEqual(
[float(group_key) * group_size for i in range(num_elements)],
reduced_tensor.numpy())
run_and_assert(group_size=2, group_key=1)
run_and_assert(group_size=3, group_key=2)
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
class AbortCollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testAbortGroupParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
def testAbortInstanceParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# First perform a normal all-reduce to complete the group resolution.
def_function.function(collective_fn)()
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
# Use a different instance key to trigger another instance resolution.
instance_key = 101
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
context._reset_context() # pylint: disable=protected-access
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def_function.function(collective_fn)()
def testAbortCommunication(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# First perform a normal collective to finish resolution.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
# Launch a collective that hangs, and abort the collective executor after
# the launch.
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Reset the context in order to reset the collective executor.
t.join()
_setup_context()
def_function.function(collective_fn)()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
class TimeoutTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testTimeout(self, collective_op, device, communication):
timeout = 1.5
@def_function.function
def run(group_size, reported_group_size=None):
group_key = 20
instance_key = 30
tensor = [1., 2., 3., 4.]
results = []
if reported_group_size is None:
reported_group_size = group_size
for i in range(group_size):
with ops.device('/{}:{}'.format(device, i)):
input_data = constant_op.constant(tensor)
result = collective_op(
input_data,
group_size=reported_group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
results.append(result)
return results
run(2, 2)
start_time = time.time()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
run(1, 2)
elapsed = time.time() - start_time
self.assertAllGreaterEqual(elapsed, timeout)
def testParamResolutionAfterTimeout(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
# This timeout comes from param solution.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev1):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
def testExecutionAfterTimeout(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
@def_function.function
def run():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# Run a normal all-reduce to complete param resolution.
run()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev1):
# No timeout.
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
def _setup_context():
context._reset_context()
test_util.set_logical_devices_to_at_least('CPU', 4)
context.ensure_initialized()
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
application_runners.py | from __future__ import print_function
import sys
import os
import uuid
import shlex
import threading
import shutil
import subprocess
import logging
import inspect
import runpy
import future.utils as utils
import flask
import requests
from dash.testing.errors import (
NoAppFoundError,
TestingTimeoutError,
ServerCloseError,
)
import dash.testing.wait as wait
logger = logging.getLogger(__name__)
def import_app(app_file, application_name="app"):
"""Import a dash application from a module. The import path is in dot
notation to the module. The variable named app will be returned.
:Example:
>>> app = import_app("my_app.app")
Will import the application in module `app` of the package `my_app`.
:param app_file: Path to the app (dot-separated).
:type app_file: str
:param application_name: The name of the dash application instance.
:raise: dash_tests.errors.NoAppFoundError
:return: App from module.
:rtype: dash.Dash
"""
try:
app_module = runpy.run_module(app_file)
app = app_module[application_name]
except KeyError:
logger.exception("the app name cannot be found")
raise NoAppFoundError(
"No dash `app` instance was found in {}".format(app_file)
)
return app
class BaseDashRunner(object):
"""Base context manager class for running applications."""
def __init__(self, keep_open, stop_timeout):
self.port = 8050
self.started = None
self.keep_open = keep_open
self.stop_timeout = stop_timeout
self._tmp_app_path = None
def start(self, *args, **kwargs):
raise NotImplementedError # pragma: no cover
def stop(self):
raise NotImplementedError # pragma: no cover
@staticmethod
def accessible(url):
try:
requests.get(url)
except requests.exceptions.RequestException:
return False
return True
def __call__(self, *args, **kwargs):
return self.start(*args, **kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, traceback):
if self.started and not self.keep_open:
try:
logger.info("killing the app runner")
self.stop()
except TestingTimeoutError:
raise ServerCloseError(
"Cannot stop server within {}s timeout".format(
self.stop_timeout
)
)
@property
def url(self):
"""The default server url."""
return "http://localhost:{}".format(self.port)
@property
def is_windows(self):
return sys.platform == "win32"
@property
def tmp_app_path(self):
return self._tmp_app_path
class ThreadedRunner(BaseDashRunner):
"""Runs a dash application in a thread.
This is the default flavor to use in dash integration tests.
"""
def __init__(self, keep_open=False, stop_timeout=3):
super(ThreadedRunner, self).__init__(
keep_open=keep_open, stop_timeout=stop_timeout
)
self.stop_route = "/_stop-{}".format(uuid.uuid4().hex)
self.thread = None
@staticmethod
def _stop_server():
# https://werkzeug.palletsprojects.com/en/0.15.x/serving/#shutting-down-the-server
stopper = flask.request.environ.get("werkzeug.server.shutdown")
if stopper is None:
raise RuntimeError("Not running with the Werkzeug Server")
stopper()
return "Flask server is shutting down"
# pylint: disable=arguments-differ,C0330
def start(self, app, **kwargs):
"""Start the app server in threading flavor."""
app.server.add_url_rule(
self.stop_route, self.stop_route, self._stop_server
)
def _handle_error():
self._stop_server()
app.server.errorhandler(500)(_handle_error)
def run():
app.scripts.config.serve_locally = True
app.css.config.serve_locally = True
if "port" not in kwargs:
kwargs["port"] = self.port
else:
self.port = kwargs["port"]
app.run_server(threaded=True, **kwargs)
self.thread = threading.Thread(target=run)
self.thread.daemon = True
try:
self.thread.start()
except RuntimeError: # multiple call on same thread
logger.exception("threaded server failed to start")
self.started = False
self.started = self.thread.is_alive()
# wait until server is able to answer http request
wait.until(lambda: self.accessible(self.url), timeout=1)
def stop(self):
requests.get("{}{}".format(self.url, self.stop_route))
wait.until_not(self.thread.is_alive, self.stop_timeout)
class ProcessRunner(BaseDashRunner):
"""Runs a dash application in a waitress-serve subprocess.
This flavor is closer to production environment but slower.
"""
def __init__(self, keep_open=False, stop_timeout=3):
super(ProcessRunner, self).__init__(
keep_open=keep_open, stop_timeout=stop_timeout
)
self.proc = None
# pylint: disable=arguments-differ
def start(
self,
app_module=None,
application_name="app",
raw_command=None,
port=8050,
start_timeout=3,
):
"""Start the server with waitress-serve in process flavor."""
if not (app_module or raw_command): # need to set a least one
logging.error(
"the process runner needs to start with"
" at least one valid command"
)
return
self.port = port
args = shlex.split(
raw_command
if raw_command
else "waitress-serve --listen=0.0.0.0:{} {}:{}.server".format(
port, app_module, application_name
),
posix=not self.is_windows,
)
logger.debug("start dash process with %s", args)
try:
self.proc = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# wait until server is able to answer http request
wait.until(
lambda: self.accessible(self.url), timeout=start_timeout
)
except (OSError, ValueError):
logger.exception("process server has encountered an error")
self.started = False
self.stop()
return
self.started = True
def stop(self):
if self.proc:
try:
self.proc.terminate()
if utils.PY3:
# pylint:disable=no-member
_except = subprocess.TimeoutExpired
# pylint: disable=unexpected-keyword-arg
self.proc.communicate(timeout=self.stop_timeout)
else:
_except = OSError
self.proc.communicate()
except _except:
logger.exception(
"subprocess terminate not success, trying to kill "
"the subprocess in a safe manner"
)
self.proc.kill()
self.proc.communicate()
class RRunner(ProcessRunner):
def __init__(self, keep_open=False, stop_timeout=3):
super(RRunner, self).__init__(
keep_open=keep_open, stop_timeout=stop_timeout
)
self.proc = None
# pylint: disable=arguments-differ
def start(self, app, start_timeout=2, cwd=None):
"""Start the server with subprocess and Rscript."""
if os.path.isfile(app) and os.path.exists(app):
# app is already a file in a dir - use that as cwd
if not cwd:
cwd = os.path.dirname(app)
logger.info("RRunner inferred cwd from app path: %s", cwd)
else:
# app is a string chunk, we make a temporary folder to store app.R
# and its relevants assets
self._tmp_app_path = os.path.join(
"/tmp" if not self.is_windows else os.getenv("TEMP"),
uuid.uuid4().hex,
)
try:
os.mkdir(self.tmp_app_path)
except OSError:
logger.exception(
"cannot make temporary folder %s", self.tmp_app_path
)
path = os.path.join(self.tmp_app_path, "app.R")
logger.info("RRunner start => app is R code chunk")
logger.info("make a temporary R file for execution => %s", path)
logger.debug("content of the dashR app")
logger.debug("%s", app)
with open(path, "w") as fp:
fp.write(app)
app = path
# try to find the path to the calling script to use as cwd
if not cwd:
for entry in inspect.stack():
if "/dash/testing/" not in entry[1].replace("\\", "/"):
cwd = os.path.dirname(os.path.realpath(entry[1]))
logger.warning("get cwd from inspect => %s", cwd)
break
if cwd:
logger.info(
"RRunner inferred cwd from the Python call stack: %s", cwd
)
else:
logger.warning(
"RRunner found no cwd in the Python call stack. "
"You may wish to specify an explicit working directory "
"using something like: "
"dashr.run_server(app, cwd=os.path.dirname(__file__))"
)
# try copying all valid sub folders (i.e. assets) in cwd to tmp
# note that the R assets folder name can be any valid folder name
assets = [
os.path.join(cwd, _)
for _ in os.listdir(cwd)
if not _.startswith("__")
and os.path.isdir(os.path.join(cwd, _))
]
for asset in assets:
target = os.path.join(
self.tmp_app_path, os.path.basename(asset)
)
if os.path.exists(target):
logger.debug("delete existing target %s", target)
shutil.rmtree(target)
logger.debug("copying %s => %s", asset, self.tmp_app_path)
shutil.copytree(asset, target)
logger.debug("copied with %s", os.listdir(target))
logger.info("Run dashR app with Rscript => %s", app)
args = shlex.split(
"Rscript -e 'source(\"{}\")'".format(os.path.realpath(app)),
posix=not self.is_windows,
)
logger.debug("start dash process with %s", args)
try:
self.proc = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.tmp_app_path if self.tmp_app_path else cwd,
)
# wait until server is able to answer http request
wait.until(
lambda: self.accessible(self.url), timeout=start_timeout
)
except (OSError, ValueError):
logger.exception("process server has encountered an error")
self.started = False
return
self.started = True
|
forward_enumerator.py | from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import MolFromSmiles as Mol
from rdkit.Chem import MolToSmiles as Smiles
from rdkit.Chem.AllChem import ReactionFromSmarts as Rxn
#from rdkit.Chem.FragmentMatcher import FragmentMatcher
#from scripts.retrosynAnalysis.utils import reactant_frags_generator
#from scripts.retrosynAnalysis.utils import working_dir_setting
#from scripts.retrosynAnalysis.utils import timeout, TimeoutError
from multiprocessing import Lock, Process, Queue, current_process
from datetime import datetime
import queue # imported for using queue.Empty exception
import pickle
import shutil
import json
import sys, time
import os
import random
from copy import copy, deepcopy
#from bisect import bisect_left
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.*')
class SynthesisTree:
'''
Tree structure to represent a synthesis tree to the given target smiles.
'''
def __init__(self, target_smi):
self.target = target_smi
self.tree = [[target_smi]]
self.notFinished = [target_smi]
self.lastRxnIdx = None
self.lastPrecursors = None
self.lastRxnLoc = None
def getCopiedTree(self):
#new_tree = SynthesisTree(self.target)
#new_tree.tree = [copy(pair) for pair in self.tree]
#new_tree.notFinished = copy(self.notFinished)
#new_tree.lastRxnIdx = copy(self.lastRxnIdx)
#new_tree.lastPrecursors = copy(self.lastPrecursors)
#new_tree.lastRxnLoc = copy(self.lastRxnLoc)
#return new_tree
return deepcopy(self)
#return copy.copy(self)
def getNumNotFin(self):
return len(self.notFinished)
def getNumRxn(self):
return len(self.tree) -1
def getTarget(self):
return self.target
def getTree(self):
return self.tree
def getNotFin(self):
return self.notFinished
def getLastRxnInform(self):
return self.lastRxnIdx, self.lastPrecursors
def getLastRxnLoc(self):
return self.lastRxnLoc
def getNumOthers(self):
return self.numOthers
def setLastRxnInform(self, idx:int, result:list, rxn_position:int):
self.lastRxnIdx = idx
self.lastPrecursors = result
self.lastRxnLoc = rxn_position
def removeNotFinLoc(self, loc_removed:int):
del self.notFinished[loc_removed]
def removeNotFinElem(self, elem_removed):
self.notFinished.remove(elem_removed)
def insertList(self, loc, l):
#self.tree.append(copy.deepcopy(self.tree[-1]))
self.tree.append(copy(self.tree[-1]))
last = self.tree[-1]
if len(last) > 1:
del last[-1]
del last[loc]
for idx, elem in enumerate(l):
last.insert(loc+idx, elem)
def insertToNotFinished(self, loc, l):
del self.notFinished[loc]
self.numOthers = len(self.notFinished)
for idx, elem in enumerate(l):
self.notFinished.insert(loc+idx, elem)
def getExpandedTrees(self, rxn_position, rxn_results):
'''
rxn_position is an index for notFinished list!!
'''
expanded_trees =[]
elem = self.notFinished[rxn_position]
loc = self.tree[-1].index(elem)
for rxn_idx, result in rxn_results:
copied_tree = self.getCopiedTree()
copied_tree.insertList(loc, result)
copied_tree.tree[-1].append([loc, rxn_idx])
copied_tree.insertToNotFinished(rxn_position, result)
copied_tree.setLastRxnInform(rxn_idx, result, rxn_position)
expanded_trees.append(copied_tree)
return expanded_trees
def updateRbagInform(self, classified_reactant_bag:list, diff:int):
'''
This method updates the R_bag inform of the synthesis_tree, and returns boolean for the test to yield True or False.
If the test is not determined in the current level, this methods returns None obj.
If the result for the test is False, the tree might be not updated properly. (Bcs not necessary)
'''
exit = False
precursor_Rbag_check = []
num_others = self.getNumOthers()
limit_numb = diff - num_others
if limit_numb < 0:
return False
rxn_idx, last_precursors = self.getLastRxnInform()
target_Rbag = classified_reactant_bag[rxn_idx]
for prec in last_precursors:
check = prec in target_Rbag
precursor_Rbag_check.append(check)
if precursor_Rbag_check.count(False) > limit_numb:
exit = True
break
if check:
self.removeNotFinElem(prec)
if exit:
return False
elif precursor_Rbag_check.count(True) == len(last_precursors) and num_others == 0:
return True
else:
return None
# Forward enumeration
def duplicate_remove(list_of_list):
if list_of_list == []:
return []
result = []
for s in list_of_list:
if not s[1] in result and not s[1][::-1] in result:
result.append(s)
return result
"""
def rxn_result_dup_remove(list_of_smi):
'''
list_of_list: list of sorted lists
'''
if len(list_of_smi)==0:
return None
ret_list = [list_of_smi[0]]
for l in list_of_smi[1:]:
if not l in list_of_smi and not None in l:
ret_list.append(l)
return ret_list
"""
def onestep_by_reactions(
reactant_pair_in_mol: list,
rxn_objs: list,
first: bool,
only_regio_not_problematic: bool,
r1_is_sm: bool,
r2_is_sm: bool
):
'''
Args:
reactant_pair_in_mol: A pair of mol objects of molecules to which the reaction will be applied.
rxn_objs: List of reaction objects.
only_regio_not_problematic: Whether considers reactions only not having regio-selectivity or not
only_uni: Whether applies only unitary reactions or not.
only_bin: Whether applies only binary reactions or not.
Returns:
list_of_products: List of generated products from both uni and bi.
'''
list_of_products = []
indice_list = list(range(len(rxn_objs)))
max = 1
random.shuffle(indice_list)
for rxn_idx in indice_list:
if len(list_of_products) >= max:
break
rxn = rxn_objs[rxn_idx]
if rxn.GetNumReactantTemplates() == 1:
# 1. unitary reactions
for idx, mol in enumerate(reactant_pair_in_mol):
if idx == 0:
if not first and r1_is_sm:
continue
if idx == 1:
if not first and r2_is_sm:
continue
try:
rxn_results = rxn.RunReactants([mol])
except:
#print(Smiles(mol))
continue
rxn_results = list(set([Smiles(prod) for pair in rxn_results for prod in pair]))
#rxn_results = rxn_result_dup_remove(rxn_results)
if rxn_results == None:
continue
if only_regio_not_problematic:
if len(rxn_results) > 1: # about regio-selectivity
continue
for p_mol in rxn_results:
# precursor, rxn idx, product
#to_add = [[[Smiles(mol)], rxn_idx], p_mol]
# product, rxn idx, precursor_idx
to_add = [p_mol, rxn_idx, 'uni', idx]
list_of_products.append(to_add)
elif rxn.GetNumReactantTemplates() == 2:
# 2. binary reactions
try:
rxn_results = rxn.RunReactants(reactant_pair_in_mol)
except:
#print(Smiles(mol))
continue
rxn_results = list(set([Smiles(prod) for pair in rxn_results for prod in pair]))
#rxn_results = rxn_result_dup_remove(rxn_results)
if rxn_results == None:
continue
if only_regio_not_problematic:
if len(rxn_results) > 1: # about regio-selectivity
continue
for p_mol in rxn_results:
# precursor, rxn idx, product
#to_add = [[[Smiles(reactant_pair_in_mol[0]), Smiles(reactant_pair_in_mol[1])],\
# rxn_idx], p_mol]
# product, rxn idx, None
to_add = [p_mol, rxn_idx, 'bin', None]
list_of_products.append(to_add)
else:
continue
return list_of_products
def first_reaction(random_pairs, rxn_objs, only_regio_not_problematic):
syn_trees = []
for pair in random_pairs:
if pair[0][0] == pair[1][0]:
continue
pair_in_mol = [Mol(pair[0][0]), Mol(pair[1][0])]
reaction_result = onestep_by_reactions(pair_in_mol, rxn_objs, first=True,
only_regio_not_problematic=only_regio_not_problematic, r1_is_sm=True, r2_is_sm=True)
if reaction_result == []:
continue
pair = [pair[0][0], pair[1][0]]
for result in reaction_result:
if result[2] == 'uni':
result[3] = [pair[result[3]]]
elif result[2] == 'bin':
result[3] = pair
syn_trees.append(result)
return syn_trees
def further_reaction(random_pairs, rxn_objs, only_regio_not_problematic, r1_is_sm, r2_is_sm):
syn_trees = []
for pair in random_pairs:
if pair[0][0] == pair[1][0]:
continue
pair_in_mol = [Mol(pair[0][0]), Mol(pair[1][0])]
reaction_result = onestep_by_reactions(pair_in_mol, rxn_objs, first=False,
only_regio_not_problematic=only_regio_not_problematic, r1_is_sm=r1_is_sm, r2_is_sm=r2_is_sm)
if reaction_result == []:
continue
if r1_is_sm == True:
pair[0] = pair[0][0]
if r2_is_sm == True:
pair[1] = pair[1][0]
for result in reaction_result:
if result[2] == 'uni':
# in further reaction, there is no case that uni reaction is applied to starting material.
result[3] = pair[result[3]]
elif result[2] == 'bin':
result[3] = pair
syn_trees.append(result)
return syn_trees
# FROM HERE (21.08.12)
# first_reaction and further_reaction also.
# see memo for data structure.
def forward_enumerator(random_pairs:list, rxn_templates:list, only_regio_not_problematic,
r1_is_sm:bool, r2_is_sm:bool):
rxn_objects = []
for rxn in rxn_templates:
try:
rxn_objects.append(Rxn(rxn))
except:
rxn_objects.append(None)
if r1_is_sm and r2_is_sm:
return first_reaction(random_pairs, rxn_objects,
only_regio_not_problematic=only_regio_not_problematic)
else:
return further_reaction(random_pairs, rxn_objects,
only_regio_not_problematic=only_regio_not_problematic,\
r1_is_sm=r1_is_sm, r2_is_sm=r2_is_sm)
def do_forward_enumeration(tasks, r1_is_sm:bool, r2_is_sm:bool):
while True:
try:
args = tasks.get(timeout=1)
except queue.Empty:
break
else:
random_pairs, rxn_templates, task_idx = args
since=time.time()
print(f'task_idx: {task_idx}')
syn_trees = forward_enumerator(random_pairs,rxn_templates, only_regio_not_problematic=True,\
r1_is_sm=r1_is_sm, r2_is_sm=r2_is_sm)
with open(f'syn_trees_{task_idx}.json', 'w') as fw:
json.dump(syn_trees, fw)
print(f' {task_idx}th task time: {(time.time()-since):.2f}')
return True
def forwardEnumerator(
Rset_1_path:str,
r1_is_sm:bool,
Rset_2_path:str,
r2_is_sm:bool,
numb_of_rand_pairs_per_core:int,
rxn_temp_path:list,
dir_name,
numb_cores):
'''
Main function. This conducts multiprocessing of 'retrosynthetic_analysis_single_batch'.
'''
with open(Rset_1_path, 'r') as fr:
if r1_is_sm == True:
Rset_1 = fr.read().splitlines()
else:
Rset_1 = json.load(fr)
#Rset_1 = json.load(fr)[40000:80000]
with open(Rset_2_path, 'r') as fr:
if r2_is_sm == True:
Rset_2 = fr.read().splitlines()
else:
Rset_2 = json.load(fr)
#Rset_2 = json.load(fr)[240000:320000]
with open(rxn_temp_path, 'r') as fr:
rxn_templates = json.load(fr)
working_dir = f'{os.getcwd()}/{dir_name}'
try:
os.mkdir(working_dir)
except:
i=2
while True:
try:
os.mkdir(f'{working_dir}{i}')
except:
i+=1
else:
break
working_dir = f'{working_dir}{i}'
os.chdir(working_dir)
print(f'Current working directory is:\n {working_dir}')
Rset_1_indice = [random.randint(0,len(Rset_1)-1) for i in range(numb_of_rand_pairs_per_core*numb_cores)]
Rset_2_indice = [random.randint(0,len(Rset_2)-1) for i in range(numb_of_rand_pairs_per_core*numb_cores)]
random_pairs = []
for idx, num in enumerate(Rset_1_indice):
to_append = []
if r1_is_sm: to_append.append([Rset_1[num]])
else: to_append.append(Rset_1[num])
if r2_is_sm: to_append.append([Rset_2[Rset_2_indice[idx]]])
else: to_append.append(Rset_2[Rset_2_indice[idx]])
random_pairs.append(to_append)
# multiprocessing of do_forward_enumeration
numb_of_tasks = int(numb_cores)
numb_of_procs = int(numb_cores)
tasks = Queue()
procs = []
since = time.time()
# creating tasks
for task_idx in range(numb_of_tasks):
args = (random_pairs[task_idx*numb_of_rand_pairs_per_core:(task_idx+1)*numb_of_rand_pairs_per_core],\
rxn_templates, task_idx)
tasks.put(args)
# creating processes
for worker in range(numb_of_procs):
p = Process(target = do_forward_enumeration, args = (tasks, r1_is_sm, r2_is_sm))
procs.append(p)
p.start()
time.sleep(0.5)
# completing processes
for p in procs:
p.join()
# join the results
print('-----'*4)
print('Forward enumeration step finished.\n Joining the results...')
syn_trees_list = []
for task_idx in range(numb_of_tasks):
with open(f'syn_trees_{task_idx}.json', 'r') as fr:
syn_trees_list += json.load(fr)
#os.remove(f'positive_set_depth_{current_depth}_{task_idx}.smi')
# save the result
time_passed = int(time.time()-since)
now = datetime.now()
finished_at = now.strftime('%Y. %m. %d (%a) %H:%M:%S')
#result_report = [f'----- Config information -----\n',
# f' Retro analysis started at: {since_inform}\n', \
# f' Target data path: {retrosynthetic_analysis_config["retro_analysis_target"]}\n',\
# f' Target data name: {target_data_name}\n', \
# f' Uni template data: {uni_temp_data}\n', f' Bi template data: {bi_temp_data}\n', \
# f' Start index: {start_index}\n', f' Reactant bag path: {conversed_classified_reactant_data}\n', f' Depth: {depth}\n', \
# f' Number of target molecules: {numb_molecules}\n', f' Number of cores: {numb_cores}\n', \
# f' Batch size: {batch_size}\n', f' With path search: {with_path_search}\n', \
# f' Max time: {max_time}\n\n', '----- Generation result -----\n']
#result_report += [f' Positive set depth_{i+1}: {numb_of_mols_in_each_pos[i]}\n' for i in range(depth)]
#result_report += [f' Negative set depth_{depth}: {numb_of_mols_in_neg}\n',\
# f'\n finished_at: {finished_at}', \
# '\n time passed: [%dh:%dm:%ds]' %(time_passed//3600, (time_passed%3600)//60, time_passed%60)]
with open('generation_result.json', 'w') as fw:
json.dump(syn_trees_list, fw)
print('-----'*4)
print(f'Forward enumeration finished at:\n {finished_at}')
print(' time passed: [%dh:%dm:%ds]' %(time_passed//3600, (time_passed%3600)//60, time_passed%60))
print('-----'*4)
return True
if __name__=='__main__':
rxn = Rxn('CC>>OO')
rxn2 = Rxn('CC>>NN')
mol = Mol('CC')
mol2 = Mol('CCC')
print(rxn.RunReactants([mol,mol2]))
|
test_threading.py | # expected: fail
# Very rudimentary test of threading module
import test.test_support
from test.test_support import verbose, cpython_only
from test.script_helper import assert_python_ok
import random
import re
import sys
thread = test.test_support.import_module('thread')
threading = test.test_support.import_module('threading')
import time
import unittest
import weakref
import os
import subprocess
try:
import _testcapi
except ImportError:
_testcapi = None
from test import lock_tests
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print 'task %s will run for %.1f usec' % (
self.name, delay * 1e6)
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print self.nrunning.get(), 'tasks are running'
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print 'task', self.name, 'done'
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print '%s is finished. %d tasks are running' % (
self.name, self.nrunning.get())
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.test_support.threading_setup()
def tearDown(self):
test.test_support.threading_cleanup(*self._threads)
test.test_support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print 'waiting for all tasks to complete'
for t in threads:
t.join(NUMTASKS)
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, \w+ -?\d+\)>', repr(t)))
if verbose:
print 'all tasks done'
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print 'with 256kB thread stack size...'
try:
threading.stack_size(262144)
except thread.error:
self.skipTest('platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print 'with 1MB thread stack size...'
try:
threading.stack_size(0x100000)
except thread.error:
self.skipTest('platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
self.skipTest('requires ctypes')
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = thread.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print " started worker thread"
# Try a thread id that doesn't make sense.
if verbose:
print " trying nonsensical thread id"
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print " waiting for worker thread to get started"
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print " verifying worker hasn't exited"
self.assertTrue(not t.finished)
if verbose:
print " attempting to raise asynch exception in worker"
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print " waiting for worker to say it caught the exception"
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print " all OK -- joining worker"
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise thread.error()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(thread.error, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
try:
import ctypes
except ImportError:
self.skipTest('requires ctypes')
rc = subprocess.call([sys.executable, "-c", """if 1:
import ctypes, sys, time, thread
# This lock is used as a simple event variable.
ready = thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
"""])
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print 'program blocked; aborting'
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
rc = p.returncode
self.assertFalse(rc == 2, "interpreted was blocked")
self.assertTrue(rc == 0,
"Unexpected error: " + repr(stderr))
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
p = subprocess.Popen([sys.executable, "-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print "Woke up, sleep function is:", sleep
threading.Thread(target=child).start()
raise SystemExit
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
self.assertEqual(stdout.strip(),
"Woke up, sleep function is: <built-in function sleep>")
stderr = re.sub(r"^\[\d+ refs\]", "", stderr, re.MULTILINE).strip()
self.assertEqual(stderr, "")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getcheckinterval()
try:
for i in xrange(1, 100):
# Try a couple times at each thread-switching interval
# to get more interleavings.
sys.setcheckinterval(i // 5)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertEqual(None, weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertEqual(None, weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, '')
self.assertEqual(err, '')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getcheckinterval()
# Make the bug more likely to manifest.
sys.setcheckinterval(10)
try:
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(1 if t.is_alive() else 0)
else:
t.join()
pid, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
finally:
sys.setcheckinterval(old_interval)
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
class ThreadJoinOnShutdown(BaseTestCase):
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'os2emx')
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print 'end of thread'
\n""" + script
p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().replace('\r', '')
p.stdout.close()
self.assertEqual(data, "end of main\nend of thread\n")
self.assertFalse(rc == 2, "interpreter was blocked")
self.assertTrue(rc == 0, "Unexpected error")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print 'end of main'
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print 'end of main'
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print 'end of main'
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
def assertScriptHasOutput(self, script, expected_output):
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().decode().replace('\r', '')
self.assertEqual(rc, 0, "Unexpected error")
self.assertEqual(data, expected_output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_joining_across_fork_in_worker_thread(self):
# There used to be a possible deadlock when forking from a child
# thread. See http://bugs.python.org/issue6643.
# The script takes the following steps:
# - The main thread in the parent process starts a new thread and then
# tries to join it.
# - The join operation acquires the Lock inside the thread's _block
# Condition. (See threading.py:Thread.join().)
# - We stub out the acquire method on the condition to force it to wait
# until the child thread forks. (See LOCK ACQUIRED HERE)
# - The child thread forks. (See LOCK HELD and WORKER THREAD FORKS
# HERE)
# - The main thread of the parent process enters Condition.wait(),
# which releases the lock on the child thread.
# - The child process returns. Without the necessary fix, when the
# main thread of the child process (which used to be the child thread
# in the parent process) attempts to exit, it will try to acquire the
# lock in the Thread._block Condition object and hang, because the
# lock was held across the fork.
script = """if 1:
import os, time, threading
finish_join = False
start_fork = False
def worker():
# Wait until this thread's lock is acquired before forking to
# create the deadlock.
global finish_join
while not start_fork:
time.sleep(0.01)
# LOCK HELD: Main thread holds lock across this call.
childpid = os.fork()
finish_join = True
if childpid != 0:
# Parent process just waits for child.
os.waitpid(childpid, 0)
# Child process should just return.
w = threading.Thread(target=worker)
# Stub out the private condition variable's lock acquire method.
# This acquires the lock and then waits until the child has forked
# before returning, which will release the lock soon after. If
# someone else tries to fix this test case by acquiring this lock
# before forking instead of resetting it, the test case will
# deadlock when it shouldn't.
condition = w._block
orig_acquire = condition.acquire
call_count_lock = threading.Lock()
call_count = 0
def my_acquire():
global call_count
global start_fork
orig_acquire() # LOCK ACQUIRED HERE
start_fork = True
if call_count == 0:
while not finish_join:
time.sleep(0.01) # WORKER THREAD FORKS HERE
with call_count_lock:
call_count += 1
condition.acquire = my_acquire
w.start()
w.join()
print('end of main')
"""
self.assertScriptHasOutput(script, "end of main\n")
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_5_clear_waiter_locks_to_avoid_crash(self):
# Check that a spawned thread that forks doesn't segfault on certain
# platforms, namely OS X. This used to happen if there was a waiter
# lock in the thread's condition variable's waiters list. Even though
# we know the lock will be held across the fork, it is not safe to
# release locks held across forks on all platforms, so releasing the
# waiter lock caused a segfault on OS X. Furthermore, since locks on
# OS X are (as of this writing) implemented with a mutex + condition
# variable instead of a semaphore, while we know that the Python-level
# lock will be acquired, we can't know if the internal mutex will be
# acquired at the time of the fork.
script = """if True:
import os, time, threading
start_fork = False
def worker():
# Wait until the main thread has attempted to join this thread
# before continuing.
while not start_fork:
time.sleep(0.01)
childpid = os.fork()
if childpid != 0:
# Parent process just waits for child.
(cpid, rc) = os.waitpid(childpid, 0)
assert cpid == childpid
assert rc == 0
print('end of worker thread')
else:
# Child process should just return.
pass
w = threading.Thread(target=worker)
# Stub out the private condition variable's _release_save method.
# This releases the condition's lock and flips the global that
# causes the worker to fork. At this point, the problematic waiter
# lock has been acquired once by the waiter and has been put onto
# the waiters list.
condition = w._block
orig_release_save = condition._release_save
def my_release_save():
global start_fork
orig_release_save()
# Waiter lock held here, condition lock released.
start_fork = True
condition._release_save = my_release_save
w.start()
w.join()
print('end of main thread')
"""
output = "end of worker thread\nend of main thread\n"
self.assertScriptHasOutput(script, output)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_6_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@cpython_only
@unittest.skipIf(_testcapi is None, "need _testcapi module")
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "genereator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class RLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading.RLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
@unittest.skipUnless(sys.platform == 'darwin', 'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RuntimeError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error")
self.assertEqual(data, expected_output)
def test_main():
test.test_support.run_unittest(LockTests, RLockTests, EventTests,
ConditionAsRLockTests, ConditionTests,
SemaphoreTests, BoundedSemaphoreTests,
ThreadTests,
ThreadJoinOnShutdown,
ThreadingExceptionTests,
)
if __name__ == "__main__":
test_main()
|
mp_benchmarks.py | #
# Simple benchmarks for the multiprocessing package
#
import time, sys, multiprocessing, threading, Queue, gc
if sys.platform == 'win32':
_timer = time.clock
else:
_timer = time.time
delta = 1
#### TEST_QUEUESPEED
def queuespeed_func(q, c, iterations):
a = '0' * 256
c.acquire()
c.notify()
c.release()
for i in xrange(iterations):
q.put(a)
q.put('STOP')
def test_queuespeed(Process, q, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = Process(target=queuespeed_func, args=(q, c, iterations))
c.acquire()
p.start()
c.wait()
c.release()
result = None
t = _timer()
while result != 'STOP':
result = q.get()
elapsed = _timer() - t
p.join()
print iterations, 'objects passed through the queue in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_PIPESPEED
def pipe_func(c, cond, iterations):
a = '0' * 256
cond.acquire()
cond.notify()
cond.release()
for i in xrange(iterations):
c.send(a)
c.send('STOP')
def test_pipespeed():
c, d = multiprocessing.Pipe()
cond = multiprocessing.Condition()
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = multiprocessing.Process(target=pipe_func,
args=(d, cond, iterations))
cond.acquire()
p.start()
cond.wait()
cond.release()
result = None
t = _timer()
while result != 'STOP':
result = c.recv()
elapsed = _timer() - t
p.join()
print iterations, 'objects passed through connection in',elapsed,'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_SEQSPEED
def test_seqspeed(seq):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in xrange(iterations):
a = seq[5]
elapsed = _timer()-t
print iterations, 'iterations in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_LOCK
def test_lockspeed(l):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in xrange(iterations):
l.acquire()
l.release()
elapsed = _timer()-t
print iterations, 'iterations in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_CONDITION
def conditionspeed_func(c, N):
c.acquire()
c.notify()
for i in xrange(N):
c.wait()
c.notify()
c.release()
def test_conditionspeed(Process, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
c.acquire()
p = Process(target=conditionspeed_func, args=(c, iterations))
p.start()
c.wait()
t = _timer()
for i in xrange(iterations):
c.notify()
c.wait()
elapsed = _timer()-t
c.release()
p.join()
print iterations * 2, 'waits in', elapsed, 'seconds'
print 'average number/sec:', iterations * 2 / elapsed
####
def test():
manager = multiprocessing.Manager()
gc.disable()
print '\n\t######## testing Queue.Queue\n'
test_queuespeed(threading.Thread, Queue.Queue(),
threading.Condition())
print '\n\t######## testing multiprocessing.Queue\n'
test_queuespeed(multiprocessing.Process, multiprocessing.Queue(),
multiprocessing.Condition())
print '\n\t######## testing Queue managed by server process\n'
test_queuespeed(multiprocessing.Process, manager.Queue(),
manager.Condition())
print '\n\t######## testing multiprocessing.Pipe\n'
test_pipespeed()
print
print '\n\t######## testing list\n'
test_seqspeed(range(10))
print '\n\t######## testing list managed by server process\n'
test_seqspeed(manager.list(range(10)))
print '\n\t######## testing Array("i", ..., lock=False)\n'
test_seqspeed(multiprocessing.Array('i', range(10), lock=False))
print '\n\t######## testing Array("i", ..., lock=True)\n'
test_seqspeed(multiprocessing.Array('i', range(10), lock=True))
print
print '\n\t######## testing threading.Lock\n'
test_lockspeed(threading.Lock())
print '\n\t######## testing threading.RLock\n'
test_lockspeed(threading.RLock())
print '\n\t######## testing multiprocessing.Lock\n'
test_lockspeed(multiprocessing.Lock())
print '\n\t######## testing multiprocessing.RLock\n'
test_lockspeed(multiprocessing.RLock())
print '\n\t######## testing lock managed by server process\n'
test_lockspeed(manager.Lock())
print '\n\t######## testing rlock managed by server process\n'
test_lockspeed(manager.RLock())
print
print '\n\t######## testing threading.Condition\n'
test_conditionspeed(threading.Thread, threading.Condition())
print '\n\t######## testing multiprocessing.Condition\n'
test_conditionspeed(multiprocessing.Process, multiprocessing.Condition())
print '\n\t######## testing condition managed by a server process\n'
test_conditionspeed(multiprocessing.Process, manager.Condition())
gc.enable()
if __name__ == '__main__':
multiprocessing.freeze_support()
test()
|
_testing.py | import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import os
from shutil import rmtree
import string
import tempfile
from typing import Any, Callable, List, Optional, Type, Union, cast
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
import pandas._libs.testing as _testing
from pandas._typing import FilePathOrBuffer, FrameOrSeries
from pandas.compat import _get_lzma_file, _import_lzma
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_list_like,
is_number,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
lzma = _import_lzma()
_N = 30
_K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("always", _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("ignore", _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
f = gzip.open(path, "rb")
elif compression == "bz2":
f = bz2.BZ2File(path, "rb")
elif compression == "xz":
f = _get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError(f"ZIP file {path} error. Only one file per ZIP.")
else:
raise ValueError(f"Unrecognized compression type: {compression}")
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
if compression == "zip":
import zipfile
compress_method = zipfile.ZipFile
elif compression == "gzip":
import gzip
compress_method = gzip.GzipFile
elif compression == "bz2":
import bz2
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = _get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
if compression == "zip":
mode = "w"
args = (dest, data)
method = "writestr"
else:
mode = "wb"
args = (data,)
method = "write"
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = False,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left,
right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p: float = 0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return "".join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False, **kwargs):
"""
Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords passed in for creating a temporary file.
:meth:`tempFile.TemporaryFile` is used when `return_filelike` is ``True``.
:meth:`tempfile.mkstemp` is used when `return_filelike` is ``False``.
Note that the `filename` parameter will be passed in as the `suffix`
argument to either function.
See Also
--------
tempfile.TemporaryFile
tempfile.mkstemp
"""
filename = filename or ""
fd = None
kwargs["suffix"] = filename
if return_filelike:
f = tempfile.TemporaryFile(**kwargs)
try:
yield f
finally:
f.close()
else:
# Don't generate tempfile if using a path with directory specified.
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(**kwargs)
except UnicodeEncodeError:
import pytest
pytest.skip("no unicode file names on this system")
try:
yield filename
finally:
try:
os.close(fd)
except OSError:
print(f"Couldn't close file descriptor: {fd} (file: {filename})")
try:
if os.path.exists(filename):
os.remove(filename)
except OSError as e:
print(f"Exception on removing file: {e}")
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = False,
check_exact: bool = True,
check_categorical: bool = True,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
def _check_types(l, r, obj="Index"):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ("string"):
assert r.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left.values, right.values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return type(x).__name__
except AttributeError:
return repr(type(x))
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr, left, right, obj="Attributes"):
"""
checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes",
)
else:
assert_index_equal(
left.categories.sort_values(),
right.categories.sort_values(),
obj=f"{obj}.categories",
)
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact, obj=f"{obj}.left")
assert_index_equal(left.right, right.right, exact=exact, obj=f"{obj}.left")
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}.values")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray"):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray"):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
__tracebackhide__ = True
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg = f"""{obj} are different
{message}
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape,
)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left, right, check_dtype=True, check_less_precise=False, check_exact=False
):
"""
Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default False
Whether to compare number exactly.
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if hasattr(left, "asi8") and type(right) == type(left):
# Avoid slow object-dtype comparisons
assert_numpy_array_equal(left.asi8, right.asi8)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(left_na, right_na, obj="ExtensionArray NA mask")
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(left_valid, right_valid, obj="ExtensionArray")
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
obj="ExtensionArray",
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
obj="Series",
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals
.. versionadded:: 1.0.2
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.index",
)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left)
and is_categorical_dtype(right)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact:
assert_numpy_array_equal(
left._internal_get_values(),
right._internal_get_values(),
check_dtype=check_dtype,
obj=str(obj),
)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if needs_i8_conversion(left) or needs_i8_conversion(right):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = (
f"[datetimelike_compat=True] {left.values} "
f"is not equal to {right.values}."
)
raise AssertionError(msg)
else:
assert_numpy_array_equal(
left._internal_get_values(),
right._internal_get_values(),
check_dtype=check_dtype,
)
elif is_interval_dtype(left) or is_interval_dtype(right):
assert_interval_array_equal(left.array, right.array)
elif is_extension_array_dtype(left.dtype) and is_datetime64tz_dtype(left.dtype):
# .values is an ndarray, but ._values is the ExtensionArray.
# TODO: Use .array
assert is_extension_array_dtype(right.dtype)
assert_extension_array_equal(left._values, right._values)
elif (
is_extension_array_dtype(left)
and not is_categorical_dtype(left)
and is_extension_array_dtype(right)
and not is_categorical_dtype(right)
):
assert_extension_array_equal(left.array, right.array)
else:
_testing.assert_almost_equal(
left._internal_get_values(),
right._internal_get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj=str(obj),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(
left.values,
right.values,
obj=f"{obj} category",
check_category_order=check_category_order,
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas._testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}",
)
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.columns",
)
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
if is_period_dtype(obj):
return period_array(obj)
elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(obj):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(
left,
right,
check_dtype=True,
check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
):
"""
Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values, check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not check_kind:
left_index = left.sp_index.to_block_index()
right_index = right.sp_index.to_block_index()
else:
left_index = left.sp_index
right_index = right.sp_index
if consolidate_block_indices and left.kind == "block":
# we'll probably remove this hack...
left_index = left_index.to_int_index().to_block_index()
right_index = right_index.to_int_index().to_block_index()
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
if check_fill_value:
assert_attr_equal("fill_value", left, right)
if check_dtype:
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense(), check_dtype=check_dtype)
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def all_index_generator(k=10):
"""
Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [
makeIntIndex,
makeFloatIndex,
makeStringIndex,
makeUnicodeIndex,
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeBoolIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func
def all_timeseries_index_generator(k=10):
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(
i=makeIntIndex,
f=makeFloatIndex,
s=makeStringIndex,
u=makeUnicodeIndex,
dt=makeDateIndex,
td=makeTimedeltaIndex,
p=makePeriodIndex,
).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingCustomDataframe(
nrows,
ncols,
density=0.9,
random_state=None,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Parameters
----------
Density : float, optional
Float in (0, 1) that gives the percentage of non-missing numbers in
the DataFrame.
random_state : {np.random.RandomState, int}, optional
Random number generator or random seed.
See makeCustomDataframe for descriptions of the rest of the parameters.
"""
df = makeCustomDataframe(
nrows,
ncols,
c_idx_names=c_idx_names,
r_idx_names=r_idx_names,
c_idx_nlevels=c_idx_nlevels,
r_idx_nlevels=r_idx_nlevels,
data_gen_f=data_gen_f,
c_ndupe_l=c_ndupe_l,
r_ndupe_l=r_ndupe_l,
dtype=dtype,
c_idx_type=c_idx_type,
r_idx_type=r_idx_type,
)
i, j = _create_missing_idx(nrows, ncols, density, random_state)
df.values[i, j] = np.nan
return df
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def optional_args(decorator):
"""
allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)
"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""
Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supercedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas._testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
errno = getattr(err.reason, "errno", None)
if errno in skip_errnos:
skip(f"Skipping test due to known errno and error {err}")
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
f"Skipping test because exception message is known and error {err}"
)
if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(f"Skipping test due to lack of connectivity and error {err}")
return wrapper
with_connectivity_check = network
@contextmanager
def assert_produces_warning(
expected_warning=Warning,
filter_level="always",
clear=None,
check_stacklevel=True,
raise_on_extra_warnings=True,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
clear : str, default None
If not ``None`` then remove any previously raised warnings from
the ``__warningsregistry__`` to ensure that no warning messages are
suppressed by this context manager. If ``None`` is specified,
the ``__warningsregistry__`` keeps track of which warnings have been
shown, and does not show them again.
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearing these warnings
# if they have happened before
# to guarantee that we will catch them
if not is_list_like(clear):
clear = [clear]
for m in clear:
try:
m.__warningregistry__.clear()
except AttributeError:
# module may not have __warningregistry__
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if expected_warning and issubclass(
actual_warning.category, expected_warning
):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = (
"Warning not set with correct stacklevel. "
f"File where warning is raised: {actual_warning.filename} != "
f"{caller.filename}. Warning message: {actual_warning.message}"
)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
msg = (
f"Did not see expected warning of class "
f"{repr(expected_warning.__name__)}"
)
assert saw_warning, msg
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
f"Caused unexpected warning(s): {repr(extra_warnings)}"
)
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
expected = sep.join(rows_list) + sep
return expected
def external_error_raised(
expected_exception: Type[Exception],
) -> Callable[[Type[Exception], None], None]:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
|
rpdb2.py | #! /usr/bin/env python
"""
rpdb2.py - version 2.4.8
A remote Python debugger for CPython
Copyright (C) 2005-2009 Nir Aides
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or any later
version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02111-1307 USA
"""
COPYRIGHT_NOTICE = """Copyright (C) 2005-2009 Nir Aides"""
CREDITS_NOTICE = """Work on version 2.4.8 was sponsored by Investortools, Inc."""
LICENSE_NOTICE = """
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or any later
version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
A copy of the GPL with the precise terms and conditions for
copying, distribution and modification follow:
"""
COPY_OF_THE_GPL_LICENSE = """
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0.
This License applies to any program or other work which contains a notice
placed by the copyright holder saying it may be distributed under the terms
of this General Public License. The "Program", below, refers to any such
program or work, and a "work based on the Program" means either the Program
or any derivative work under copyright law: that is to say, a work containing
the Program or a portion of it, either verbatim or with modifications and/or
translated into another language. (Hereinafter, translation is included
without limitation in the term "modification".) Each licensee is addressed
as "you".
Activities other than copying, distribution and modification are not covered
by this License; they are outside its scope. The act of running the Program
is not restricted, and the output from the Program is covered only if its
contents constitute a work based on the Program (independent of having been
made by running the Program). Whether that is true depends on what the
Program does.
1.
You may copy and distribute verbatim copies of the Program's source code as
you receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice and
disclaimer of warranty; keep intact all the notices that refer to this
License and to the absence of any warranty; and give any other recipients of
the Program a copy of this License along with the Program.
You may charge a fee for the physical act of transferring a copy, and you
may at your option offer warranty protection in exchange for a fee.
2.
You may modify your copy or copies of the Program or any portion of it, thus
forming a work based on the Program, and copy and distribute such modifications
or work under the terms of Section 1 above, provided that you also meet all
of these conditions:
a) You must cause the modified files to carry prominent notices stating
that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in whole
or in part contains or is derived from the Program or any part thereof,
to be licensed as a whole at no charge to all third parties under the
terms of this License.
c) If the modified program normally reads commands interactively when
run, you must cause it, when started running for such interactive use in
the most ordinary way, to print or display an announcement including an
appropriate copyright notice and a notice that there is no warranty (or
else, saying that you provide a warranty) and that users may redistribute
the program under these conditions, and telling the user how to view a
copy of this License. (Exception: if the Program itself is interactive
but does not normally print such an announcement, your work based on the
Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If identifiable
sections of that work are not derived from the Program, and can be reasonably
considered independent and separate works in themselves, then this License,
and its terms, do not apply to those sections when you distribute them as
separate works. But when you distribute the same sections as part of a whole
which is a work based on the Program, the distribution of the whole must be
on the terms of this License, whose permissions for other licensees extend to
the entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest your
rights to work written entirely by you; rather, the intent is to exercise the
right to control the distribution of derivative or collective works based on
the Program.
In addition, mere aggregation of another work not based on the Program with
the Program (or with a work based on the Program) on a volume of a storage or
distribution medium does not bring the other work under the scope of this
License.
3. You may copy and distribute the Program (or a work based on it, under
Section 2) in object code or executable form under the terms of Sections 1
and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable source
code, which must be distributed under the terms of Sections 1 and 2 above
on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three years, to
give any third party, for a charge no more than your cost of physically
performing source distribution, a complete machine-readable copy of the
corresponding source code, to be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer to
distribute corresponding source code. (This alternative is allowed only
for noncommercial distribution and only if you received the program in
object code or executable form with such an offer, in accord with
Subsection b above.)
The source code for a work means the preferred form of the work for making
modifications to it. For an executable work, complete source code means all
the source code for all modules it contains, plus any associated interface
definition files, plus the scripts used to control compilation and
installation of the executable. However, as a special exception, the source
code distributed need not include anything that is normally distributed (in
either source or binary form) with the major components (compiler, kernel,
and so on) of the operating system on which the executable runs, unless that
component itself accompanies the executable.
If distribution of executable or object code is made by offering access to
copy from a designated place, then offering equivalent access to copy the
source code from the same place counts as distribution of the source code,
even though third parties are not compelled to copy the source along with
the object code.
4. You may not copy, modify, sublicense, or distribute the Program except as
expressly provided under this License. Any attempt otherwise to copy, modify,
sublicense or distribute the Program is void, and will automatically
terminate your rights under this License. However, parties who have received
copies, or rights, from you under this License will not have their licenses
terminated so long as such parties remain in full compliance.
5. You are not required to accept this License, since you have not signed it.
However, nothing else grants you permission to modify or distribute the
Program or its derivative works. These actions are prohibited by law if you
do not accept this License. Therefore, by modifying or distributing the
Program (or any work based on the Program), you indicate your acceptance of
this License to do so, and all its terms and conditions for copying,
distributing or modifying the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the Program),
the recipient automatically receives a license from the original licensor to
copy, distribute or modify the Program subject to these terms and conditions.
You may not impose any further restrictions on the recipients' exercise of
the rights granted herein. You are not responsible for enforcing compliance
by third parties to this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or otherwise)
that contradict the conditions of this License, they do not excuse you from
the conditions of this License. If you cannot distribute so as to satisfy
simultaneously your obligations under this License and any other pertinent
obligations, then as a consequence you may not distribute the Program at all.
For example, if a patent license would not permit royalty-free redistribution
of the Program by all those who receive copies directly or indirectly through
you, then the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under any
particular circumstance, the balance of the section is intended to apply and
the section as a whole is intended to apply in other circumstances.
It is not the purpose of this section to induce you to infringe any patents
or other property right claims or to contest validity of any such claims;
this section has the sole purpose of protecting the integrity of the free
software distribution system, which is implemented by public license
practices. Many people have made generous contributions to the wide range of
software distributed through that system in reliance on consistent
application of that system; it is up to the author/donor to decide if he or
she is willing to distribute software through any other system and a licensee
cannot impose that choice.
This section is intended to make thoroughly clear what is believed to be a
consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in certain
countries either by patents or by copyrighted interfaces, the original
copyright holder who places the Program under this License may add an
explicit geographical distribution limitation excluding those countries,
so that distribution is permitted only in or among countries not thus
excluded. In such case, this License incorporates the limitation as if
written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions of
the General Public License from time to time. Such new versions will be
similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and
"any later version", you have the option of following the terms and
conditions either of that version or of any later version published by the
Free Software Foundation. If the Program does not specify a version number
of this License, you may choose any version ever published by the
Free Software Foundation.
10. If you wish to incorporate parts of the Program into other free programs
whose distribution conditions are different, write to the author to ask for
permission. For software which is copyrighted by the Free Software
Foundation, write to the Free Software Foundation; we sometimes make
exceptions for this. Our decision will be guided by the two goals of
preserving the free status of all derivatives of our free software and of
promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR
THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE
STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE
PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND
PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE,
YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO
LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR
THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
"""
if '.' in __name__:
raise ImportError('rpdb2 must not be imported as part of a package!')
import subprocess
import threading
import traceback
import zipimport
import tempfile
import __main__
import platform
import operator
import weakref
import os.path
import zipfile
import pickle
import socket
import getopt
import string
import random
import base64
import atexit
import locale
import codecs
import signal
import errno
import time
import copy
import hmac
import stat
import zlib
import sys
import cmd
import imp
import os
import re
try:
import hashlib
_md5 = hashlib.md5
except:
import md5
_md5 = md5
try:
import compiler
import sets
except:
pass
try:
import popen2
except:
pass
try:
from Crypto.Cipher import DES
except ImportError:
pass
#
# Pre-Import needed by my_abspath1
#
try:
from nt import _getfullpathname
except ImportError:
pass
try:
import SimpleXMLRPCServer
import xmlrpclib
import SocketServer
import commands
import copy_reg
import httplib
import thread
except:
#
# The above modules were renamed in Python 3 so try to import them 'as'
#
import xmlrpc.server as SimpleXMLRPCServer
import xmlrpc.client as xmlrpclib
import socketserver as SocketServer
import subprocess as commands
import copyreg as copy_reg
import http.client as httplib
import _thread as thread
#
# Needed in py3k path.
#
import numbers
#
#-------------------------------- Design Notes -------------------------------
#
"""
Design:
RPDB2 divides the world into two main parts: debugger and debuggee.
The debuggee is the script that needs to be debugged.
The debugger is another script that attaches to the debuggee for the
purpose of debugging.
Thus RPDB2 includes two main components: The debuggee-server that runs
in the debuggee and the session-manager that runs in the debugger.
The session manager and the debuggee-server communicate via XML-RPC.
The main classes are: CSessionManager and CDebuggeeServer
"""
#
#--------------------------------- Export functions ------------------------
#
TIMEOUT_FIVE_MINUTES = 5 * 60.0
def start_embedded_debugger(
_rpdb2_pwd,
fAllowUnencrypted = True,
fAllowRemote = False,
timeout = TIMEOUT_FIVE_MINUTES,
source_provider = None,
fDebug = False,
depth = 0
):
"""
Use 'start_embedded_debugger' to invoke the debugger engine in embedded
scripts. put the following line as the first line in your script:
import rpdb2; rpdb2.start_embedded_debugger(<some-password-string>)
This will cause the script to freeze until a debugger console attaches.
_rpdb2_pwd - The password that governs security of client/server communication.
fAllowUnencrypted - Allow unencrypted communications. Communication will
be authenticated but encrypted only if possible.
fAllowRemote - Allow debugger consoles from remote machines to connect.
timeout - Seconds to wait for attachment before giving up. Once the
timeout period expires, the debuggee will resume execution.
If None, never give up. If 0, do not wait at all.
source_provider - When script source is not available on file system it is
possible to specify a function that receives a "filename" and returns
its source. If filename specifies a file that does not fall under
the jurisdiction of this function it should raise IOError. If this
function is responsible for the specified file but the source is
not available it should raise IOError(SOURCE_NOT_AVAILABLE). You can
study the way source_provider_blender() works. Note that a misbehaving
function can break the debugger.
fDebug - debug output.
depth - Depth of the frame in which the debugger should be started. This
defaults to '0' so the top of stack will be in the code where
start_embedded_debugger is called.
IMPORTNAT SECURITY NOTE:
USING A HARDCODED PASSWORD MAY BE UNSECURE SINCE ANYONE WITH READ
PERMISSION TO THE SCRIPT WILL BE ABLE TO READ THE PASSWORD AND CONNECT TO
THE DEBUGGER AND DO WHATEVER THEY WISH VIA THE 'EXEC' DEBUGGER COMMAND.
It is safer to use: start_embedded_debugger_interactive_password()
"""
return __start_embedded_debugger(
_rpdb2_pwd,
fAllowUnencrypted,
fAllowRemote,
timeout,
source_provider,
fDebug,
depth + 2
)
def start_embedded_debugger_interactive_password(
fAllowUnencrypted = True,
fAllowRemote = False,
timeout = TIMEOUT_FIVE_MINUTES,
source_provider = None,
fDebug = False,
stdin = sys.stdin,
stdout = sys.stdout,
depth = 0
):
if g_server is not None:
return
while True:
if stdout is not None:
stdout.write('Please type password:')
_rpdb2_pwd = stdin.readline().rstrip('\n')
_rpdb2_pwd = as_unicode(_rpdb2_pwd, detect_encoding(stdin), fstrict = True)
try:
return __start_embedded_debugger(
_rpdb2_pwd,
fAllowUnencrypted,
fAllowRemote,
timeout,
source_provider,
fDebug,
depth + 2
)
except BadArgument:
stdout.write(STR_PASSWORD_BAD)
def settrace():
"""
Trace threads that were created with thread.start_new_thread()
To trace, call this function from the thread target function.
NOTE: The main thread and any threads created with the threading module
are automatically traced, and there is no need to invoke this function
for them.
Note: This call does not pause the script.
"""
return __settrace()
def setbreak(depth = 0):
"""
Pause the script for inspection at next script statement.
"""
return __setbreak(depth + 2)
def set_temp_breakpoint(path, scopename = '', lineno = 1):
"""
Set a temporary breakpoint in a file. path must be an absolute path.
scopename can either be an empty string or a fully qualified scope name
(For example u'g_debugger.m_bp_manager.set_temp_breakpoint'). lineno is
either relative to file start or to scope start.
To set a temporary breakpoint to hit when a file is first
imported or exec-uted call set_temp_breakpoint(path)
This function may throw a varaiety of exceptions.
"""
path = as_unicode(path, fstrict = True)
scopename = as_unicode(scopename, fstrict = True)
return __set_temp_breakpoint(path, scopename, lineno)
#
#----------------------------------- Interfaces ------------------------------
#
VERSION = (2, 4, 8, 0, 'Tychod')
RPDB_TITLE = "RPDB 2.4.8 - Tychod"
RPDB_VERSION = "RPDB_2_4_8"
RPDB_COMPATIBILITY_VERSION = "RPDB_2_4_0"
def get_version():
return RPDB_VERSION
def get_interface_compatibility_version():
return RPDB_COMPATIBILITY_VERSION
class CSimpleSessionManager:
"""
This is a wrapper class that simplifies launching and controlling of a
debuggee from within another program. For example, an IDE that launches
a script for debugging puposes can use this class to launch, debug and
stop a script.
"""
def __init__(self, fAllowUnencrypted = True):
self.__sm = CSessionManagerInternal(
_rpdb2_pwd = None,
fAllowUnencrypted = fAllowUnencrypted,
fAllowRemote = False,
host = LOCALHOST
)
self.m_fRunning = False
event_type_dict = {CEventUnhandledException: {}}
self.__sm.register_callback(self.__unhandled_exception, event_type_dict, fSingleUse = False)
event_type_dict = {CEventState: {}}
self.__sm.register_callback(self.__state_calback, event_type_dict, fSingleUse = False)
event_type_dict = {CEventExit: {}}
self.__sm.register_callback(self.__termination_callback, event_type_dict, fSingleUse = False)
def shutdown(self):
self.__sm.shutdown()
def launch(self, fchdir, command_line, encoding = 'utf-8', fload_breakpoints = False):
command_line = as_unicode(command_line, encoding, fstrict = True)
self.m_fRunning = False
self.__sm.launch(fchdir, command_line, fload_breakpoints)
def request_go(self):
self.__sm.request_go()
def detach(self):
self.__sm.detach()
def stop_debuggee(self):
self.__sm.stop_debuggee()
def get_session_manager(self):
return self.__sm
def prepare_attach(self):
"""
Use this method to attach a debugger to the debuggee after an
exception is caught.
"""
_rpdb2_pwd = self.__sm.get_password()
si = self.__sm.get_server_info()
rid = si.m_rid
if os.name == 'posix':
#
# On posix systems the password is set at the debuggee via
# a special temporary file.
#
create_pwd_file(rid, _rpdb2_pwd)
_rpdb2_pwd = None
return (rid, _rpdb2_pwd)
#
# Override these callbacks to react to the related events.
#
def unhandled_exception_callback(self):
_print('unhandled_exception_callback')
self.request_go()
def script_paused(self):
_print('script_paused')
self.request_go()
def script_terminated_callback(self):
_print('script_terminated_callback')
#
# Private Methods
#
def __unhandled_exception(self, event):
self.unhandled_exception_callback()
def __termination_callback(self, event):
self.script_terminated_callback()
def __state_calback(self, event):
"""
Handle state change notifications from the debugge.
"""
if event.m_state != STATE_BROKEN:
return
if not self.m_fRunning:
#
# First break comes immediately after launch.
#
print_debug('Simple session manager continues on first break.')
self.m_fRunning = True
self.request_go()
return
if self.__sm.is_unhandled_exception():
return
sl = self.__sm.get_stack(tid_list = [], fAll = False)
if len(sl) == 0:
self.request_go()
return
st = sl[0]
s = st.get(DICT_KEY_STACK, [])
if len(s) == 0:
self.request_go()
return
e = s[-1]
function_name = e[2]
filename = os.path.basename(e[0])
if filename != DEBUGGER_FILENAME:
#
# This is a user breakpoint (e.g. rpdb2.setbreak())
#
self.script_paused()
return
#
# This is the setbreak() before a fork, exec or program
# termination.
#
self.request_go()
return
class CSessionManager:
"""
Interface to the session manager.
This is the interface through which the debugger controls and
communicates with the debuggee.
Accepted strings are either utf-8 or Unicode unless specified otherwise.
Returned strings are Unicode (also when embedded in data structures).
You can study the way it is used in StartClient()
"""
def __init__(self, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host):
if _rpdb2_pwd != None:
assert(is_valid_pwd(_rpdb2_pwd))
_rpdb2_pwd = as_unicode(_rpdb2_pwd, fstrict = True)
self.__smi = CSessionManagerInternal(
_rpdb2_pwd,
fAllowUnencrypted,
fAllowRemote,
host
)
def shutdown(self):
return self.__smi.shutdown()
def set_printer(self, printer):
"""
'printer' is a function that takes one argument and prints it.
You can study CConsoleInternal.printer() as example for use
and rational.
"""
return self.__smi.set_printer(printer)
def report_exception(self, type, value, tb):
"""
Sends exception information to the printer.
"""
return self.__smi.report_exception(type, value, tb)
def register_callback(self, callback, event_type_dict, fSingleUse):
"""
Receive events from the session manager.
The session manager communicates it state mainly by firing events.
You can study CConsoleInternal.__init__() as example for use.
For details see CEventDispatcher.register_callback()
"""
return self.__smi.register_callback(
callback,
event_type_dict,
fSingleUse
)
def remove_callback(self, callback):
return self.__smi.remove_callback(callback)
def refresh(self):
"""
Fire again all relevant events needed to establish the current state.
"""
return self.__smi.refresh()
def launch(self, fchdir, command_line, encoding = 'utf-8', fload_breakpoints = True):
"""
Launch debuggee in a new process and attach.
fchdir - Change current directory to that of the debuggee.
command_line - command line arguments pass to the script as a string.
fload_breakpoints - Load breakpoints of last session.
if command line is not a unicode string it will be decoded into unicode
with the given encoding
"""
command_line = as_unicode(command_line, encoding, fstrict = True)
return self.__smi.launch(fchdir, command_line, fload_breakpoints)
def restart(self):
"""
Restart debug session with same command_line and fchdir arguments
which were used in last launch.
"""
return self.__smi.restart()
def get_launch_args(self):
"""
Return command_line and fchdir arguments which were used in last
launch as (last_fchdir, last_command_line).
Returns (None, None) if there is no info.
"""
return self.__smi.get_launch_args()
def attach(self, key, name = None, encoding = 'utf-8'):
"""
Attach to a debuggee (establish communication with the debuggee-server)
key - a string specifying part of the filename or PID of the debuggee.
if key is not a unicode string it will be decoded into unicode
with the given encoding
"""
key = as_unicode(key, encoding, fstrict = True)
return self.__smi.attach(key, name)
def detach(self):
"""
Let the debuggee go...
"""
return self.__smi.detach()
def request_break(self):
return self.__smi.request_break()
def request_go(self):
return self.__smi.request_go()
def request_go_breakpoint(self, filename, scope, lineno):
"""
Go (run) until the specified location is reached.
"""
filename = as_unicode(filename, fstrict = True)
scope = as_unicode(scope, fstrict = True)
return self.__smi.request_go_breakpoint(filename, scope, lineno)
def request_step(self):
"""
Go until the next line of code is reached.
"""
return self.__smi.request_step()
def request_next(self):
"""
Go until the next line of code in the same scope is reached.
"""
return self.__smi.request_next()
def request_return(self):
"""
Go until end of scope is reached.
"""
return self.__smi.request_return()
def request_jump(self, lineno):
"""
Jump to the specified line number in the same scope.
"""
return self.__smi.request_jump(lineno)
#
# REVIEW: should return breakpoint ID
#
def set_breakpoint(self, filename, scope, lineno, fEnabled, expr):
"""
Set a breakpoint.
filename - (Optional) can be either a file name or a module name,
full path, relative path or no path at all.
If filname is None or '', then the current module is
used.
scope - (Optional) Specifies a dot delimited scope for the
breakpoint, such as: foo or myClass.foo
lineno - (Optional) Specify a line within the selected file or
if a scope is specified, an zero-based offset from the
start of the scope.
expr - (Optional) A Python expression that will be evaluated
locally when the breakpoint is hit. The break will
occur only if the expression evaluates to true.
"""
filename = as_unicode(filename, fstrict = True)
scope = as_unicode(scope, fstrict = True)
expr = as_unicode(expr, fstrict = True)
return self.__smi.set_breakpoint(
filename,
scope,
lineno,
fEnabled,
expr
)
def disable_breakpoint(self, id_list, fAll):
"""
Disable breakpoints
id_list - (Optional) A list of breakpoint ids.
fAll - disable all breakpoints regardless of id_list.
"""
return self.__smi.disable_breakpoint(id_list, fAll)
def enable_breakpoint(self, id_list, fAll):
"""
Enable breakpoints
id_list - (Optional) A list of breakpoint ids.
fAll - disable all breakpoints regardless of id_list.
"""
return self.__smi.enable_breakpoint(id_list, fAll)
def delete_breakpoint(self, id_list, fAll):
"""
Delete breakpoints
id_list - (Optional) A list of breakpoint ids.
fAll - disable all breakpoints regardless of id_list.
"""
return self.__smi.delete_breakpoint(id_list, fAll)
def get_breakpoints(self):
"""
Return breakpoints in a dictionary of id keys to CBreakPoint values
"""
return self.__smi.get_breakpoints()
def save_breakpoints(self, _filename = ''):
"""
Save breakpoints to file, locally (on the client side)
"""
return self.__smi.save_breakpoints(_filename)
def load_breakpoints(self, _filename = ''):
"""
Load breakpoints from file, locally (on the client side)
"""
return self.__smi.load_breakpoints(_filename)
def set_trap_unhandled_exceptions(self, ftrap):
"""
Set trap-unhandled-exceptions mode.
ftrap with a value of False means unhandled exceptions will be ignored.
The session manager default is True.
"""
return self.__smi.set_trap_unhandled_exceptions(ftrap)
def get_trap_unhandled_exceptions(self):
"""
Get trap-unhandled-exceptions mode.
"""
return self.__smi.get_trap_unhandled_exceptions()
def set_fork_mode(self, ffork_into_child, ffork_auto):
"""
Determine how to handle os.fork().
ffork_into_child - True|False - If True, the debugger will debug the
child process after a fork, otherwise the debugger will continue
to debug the parent process.
ffork_auto - True|False - If True, the debugger will not pause before
a fork and will automatically make a decision based on the
value of the ffork_into_child flag.
"""
return self.__smi.set_fork_mode(ffork_into_child, ffork_auto)
def get_fork_mode(self):
"""
Return the fork mode in the form of a (ffork_into_child, ffork_auto)
flags tuple.
"""
return self.__smi.get_fork_mode()
def get_stack(self, tid_list, fAll):
return self.__smi.get_stack(tid_list, fAll)
def get_source_file(self, filename, lineno, nlines):
filename = as_unicode(filename, fstrict = True)
return self.__smi.get_source_file(filename, lineno, nlines)
def get_source_lines(self, nlines, fAll):
return self.__smi.get_source_lines(nlines, fAll)
def set_frame_index(self, frame_index):
"""
Set frame index. 0 is the current executing frame, and 1, 2, 3,
are deeper into the stack.
"""
return self.__smi.set_frame_index(frame_index)
def get_frame_index(self):
"""
Get frame index. 0 is the current executing frame, and 1, 2, 3,
are deeper into the stack.
"""
return self.__smi.get_frame_index()
def set_analyze(self, fAnalyze):
"""
Toggle analyze mode. In analyze mode the stack switches to the
exception stack for examination.
"""
return self.__smi.set_analyze(fAnalyze)
def set_host(self, host):
"""
Set host to specified host (string) for attaching to debuggies on
specified host. host can be a host name or ip address in string form.
"""
return self.__smi.set_host(host)
def get_host(self):
return self.__smi.get_host()
def calc_server_list(self):
"""
Calc servers (debuggable scripts) list on specified host.
Returns a tuple of a list and a dictionary.
The list is a list of CServerInfo objects sorted by their age
ordered oldest last.
The dictionary is a dictionary of errors that were encountered
during the building of the list. The dictionary has error (exception)
type as keys and number of occurances as values.
"""
return self.__smi.calc_server_list()
def get_server_info(self):
"""
Return CServerInfo server info object that corresponds to the
server (debugged script) to which the session manager is
attached.
"""
return self.__smi.get_server_info()
def get_namespace(self, nl, filter_level, repr_limit = 128, fFilter = "DEPRECATED"):
"""
get_namespace is designed for locals/globals panes that let
the user inspect a namespace tree in GUI debuggers such as Winpdb.
You can study the way it is used in Winpdb.
nl - List of tuples, where each tuple is made of a python expression
as string and a flag that controls whether to "expand" the
value, that is, to return its children as well in case it has
children e.g. lists, dictionaries, etc...
filter_level - 0, 1, or 2. Filter out methods and functions from
classes and objects. (0 - None, 1 - Medium, 2 - Maximum).
repr_limit - Length limit (approximated) to be imposed on repr() of
returned items.
examples of expression lists:
[('x', false), ('y', false)]
[('locals()', true)]
[('a.b.c', false), ('my_object.foo', false), ('another_object', true)]
Return value is a list of dictionaries, where every element
in the list corresponds to an element in the input list 'nl'.
Each dictionary has the following keys and values:
DICT_KEY_EXPR - the original expression string.
DICT_KEY_REPR - A repr of the evaluated value of the expression.
DICT_KEY_IS_VALID - A boolean that indicates if the repr value is
valid for the purpose of re-evaluation.
DICT_KEY_TYPE - A string representing the type of the experession's
evaluated value.
DICT_KEY_N_SUBNODES - If the evaluated value has children like items
in a list or in a dictionary or members of a class,
etc, this key will have their number as value.
DICT_KEY_SUBNODES - If the evaluated value has children and the
"expand" flag was set for this expression, then the
value of this key will be a list of dictionaries as
described below.
DICT_KEY_ERROR - If an error prevented evaluation of this expression
the value of this key will be a repr of the
exception info: repr(sys.exc_info())
Each dictionary for child items has the following keys and values:
DICT_KEY_EXPR - The Python expression that designates this child.
e.g. 'my_list[0]' designates the first child of the
list 'my_list'
DICT_KEY_NAME - a repr of the child name, e.g '0' for the first item
in a list.
DICT_KEY_REPR - A repr of the evaluated value of the expression.
DICT_KEY_IS_VALID - A boolean that indicates if the repr value is
valid for the purpose of re-evaluation.
DICT_KEY_TYPE - A string representing the type of the experession's
evaluated value.
DICT_KEY_N_SUBNODES - If the evaluated value has children like items
in a list or in a dictionary or members of a class,
etc, this key will have their number as value.
"""
if fFilter != "DEPRECATED":
filter_level = fFilter
filter_level = int(filter_level)
return self.__smi.get_namespace(nl, filter_level, repr_limit)
#
# REVIEW: remove warning item.
#
def evaluate(self, expr):
"""
Evaluate a python expression in the context of the current thread
and frame.
Return value is a tuple (v, w, e) where v is a repr of the evaluated
expression value, w is always '', and e is an error string if an error
occurred.
NOTE: This call might not return since debugged script logic can lead
to tmporary locking or even deadlocking.
"""
expr = as_unicode(expr, fstrict = True)
return self.__smi.evaluate(expr)
def execute(self, suite):
"""
Execute a python statement in the context of the current thread
and frame.
Return value is a tuple (w, e) where w and e are warning and
error strings (respectively) if an error occurred.
NOTE: This call might not return since debugged script logic can lead
to tmporary locking or even deadlocking.
"""
suite = as_unicode(suite, fstrict = True)
return self.__smi.execute(suite)
def complete_expression(self, expr):
"""
Return matching completions for expression.
Accepted expressions are of the form a.b.c
Dictionary lookups or functions call are not evaluated. For
example: 'getobject().complete' or 'dict[item].complete' are
not processed.
On the other hand partial expressions and statements are
accepted. For example: 'foo(arg1, arg2.member.complete' will
be accepted and the completion for 'arg2.member.complete' will
be calculated.
Completions are returned as a tuple of two items. The first item
is a prefix to expr and the second item is a list of completions.
For example if expr is 'foo(self.comp' the returned tuple can
be ('foo(self.', ['complete', 'completion', etc...])
"""
expr = as_unicode(expr, fstrict = True)
return self.__smi.complete_expression(expr)
def set_encoding(self, encoding, fraw = False):
"""
Set the encoding that will be used as source encoding for execute()
evaluate() commands and in strings returned by get_namespace().
The encoding value can be either 'auto' or any encoding accepted by
the codecs module. If 'auto' is specified, the encoding used will be
the source encoding of the active scope, which is utf-8 by default.
The default encoding value is 'auto'.
If fraw is True, strings returned by evaluate() and get_namespace()
will represent non ASCII characters as an escape sequence.
"""
return self.__smi.set_encoding(encoding, fraw)
def get_encoding(self):
"""
return the (encoding, fraw) tuple.
"""
return self.__smi.get_encoding()
def set_synchronicity(self, fsynchronicity):
"""
Set the synchronicity mode.
Traditional Python debuggers that use the inspected thread (usually
the main thread) to query or modify the script name-space have to
wait until the script hits a break-point. Synchronicity allows the
debugger to query and modify the script name-space even if its
threads are still running or blocked in C library code by using
special worker threads. In some rare cases querying or modifying data
in synchronicity can crash the script. For example in some Linux
builds of wxPython querying the state of wx objects from a thread
other than the GUI thread can crash the script. If this happens or
if you want to restrict these operations to the inspected thread,
turn synchronicity off.
On the other hand when synchronicity is off it is possible to
accidentally deadlock or block indefinitely the script threads by
querying or modifying particular data structures.
The default is on (True).
"""
return self.__smi.set_synchronicity(fsynchronicity)
def get_synchronicity(self):
return self.__smi.get_synchronicity()
def get_state(self):
"""
Get the session manager state. Return one of the STATE_* constants
defined below, for example STATE_DETACHED, STATE_BROKEN, etc...
"""
return self.__smi.get_state()
#
# REVIEW: Improve data strucutre.
#
def get_thread_list(self):
return self.__smi.get_thread_list()
def set_thread(self, tid):
"""
Set the focused thread to the soecified thread.
tid - either the OS thread id or the zero based index of the thread
in the thread list returned by get_thread_list().
"""
return self.__smi.set_thread(tid)
def set_password(self, _rpdb2_pwd):
"""
Set the password that will govern the authentication and encryption
of client-server communication.
"""
_rpdb2_pwd = as_unicode(_rpdb2_pwd, fstrict = True)
return self.__smi.set_password(_rpdb2_pwd)
def get_password(self):
"""
Get the password that governs the authentication and encryption
of client-server communication.
"""
return self.__smi.get_password()
def get_encryption(self):
"""
Get the encryption mode. Return True if unencrypted connections are
not allowed. When launching a new debuggee the debuggee will inherit
the encryption mode. The encryption mode can be set via command-line
only.
"""
return self.__smi.get_encryption()
def set_remote(self, fAllowRemote):
"""
Set the remote-connections mode. if True, connections from remote
machine are allowed. When launching a new debuggee the debuggee will
inherit this mode. This mode is only relevant to the debuggee.
"""
return self.__smi.set_remote(fAllowRemote)
def get_remote(self):
"""
Get the remote-connections mode. Return True if connections from
remote machine are allowed. When launching a new debuggee the
debuggee will inherit this mode. This mode is only relevant to the
debuggee.
"""
return self.__smi.get_remote()
def set_environ(self, envmap):
"""
Set the environment variables mapping. This mapping is used
when a new script is launched to modify its environment.
Example for a mapping on Windows: [('Path', '%Path%;c:\\mydir')]
Example for a mapping on Linux: [('PATH', '$PATH:~/mydir')]
The mapping should be a list of tupples where each tupple is
composed of a key and a value. Keys and Values must be either
strings or Unicode strings. Other types will raise the BadArgument
exception.
Invalid arguments will be silently ignored.
"""
return self.__smi.set_environ(envmap)
def get_environ(self):
"""
Return the current environment mapping.
"""
return self.__smi.get_environ()
def stop_debuggee(self):
"""
Stop the debuggee immediately.
"""
return self.__smi.stop_debuggee()
class CConsole:
"""
Interface to a debugger console.
"""
def __init__(
self,
session_manager,
stdin = None,
stdout = None,
fSplit = False
):
"""
Constructor of CConsole
session_manager - session manager object.
stdin, stdout - redirection for IO.
fsplit - Set flag to True when Input and Ouput belong to different
panes. For example take a look at Winpdb.
"""
self.m_ci = CConsoleInternal(
session_manager,
stdin,
stdout,
fSplit
)
def start(self):
return self.m_ci.start()
def join(self):
"""
Wait until the console ends.
"""
return self.m_ci.join()
def set_filename(self, filename):
"""
Set current filename for the console. The current filename can change
from outside the console when the console is embeded in other
components, for example take a look at Winpdb.
"""
filename = as_unicode(filename)
return self.m_ci.set_filename(filename)
def complete(self, text, state):
"""
Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
text = as_unicode(text)
return self.m_ci.complete(text, state)
def printer(self, text):
text = as_unicode(text)
return self.m_ci.printer(text)
#
# ---------------------------- Exceptions ----------------------------------
#
class CException(Exception):
"""
Base exception class for the debugger.
"""
def __init__(self, *args):
Exception.__init__(self, *args)
class BadMBCSPath(CException):
"""
Raised on Windows systems when the python executable or debugger script
path can not be encoded with the file system code page. This means that
the Windows code page is misconfigured.
"""
class NotPythonSource(CException):
"""
Raised when an attempt to load non Python source is made.
"""
class InvalidScopeName(CException):
"""
Invalid scope name.
This exception might be thrown when a request was made to set a breakpoint
to an unknown scope.
"""
class BadArgument(CException):
"""
Bad Argument.
"""
class ThreadNotFound(CException):
"""
Thread not found.
"""
class NoThreads(CException):
"""
No Threads.
"""
class ThreadDone(CException):
"""
Thread Done.
"""
class DebuggerNotBroken(CException):
"""
Debugger is not broken.
This exception is thrown when an operation that can only be performed
while the debuggee is broken, is requested while the debuggee is running.
"""
class InvalidFrame(CException):
"""
Invalid Frame.
This exception is raised if an operation is requested on a stack frame
that does not exist.
"""
class NoExceptionFound(CException):
"""
No Exception Found.
This exception is raised when exception information is requested, but no
exception is found, or has been thrown.
"""
class CConnectionException(CException):
def __init__(self, *args):
CException.__init__(self, *args)
class FirewallBlock(CConnectionException):
"""Firewall is blocking socket communication."""
class BadVersion(CConnectionException):
"""Bad Version."""
def __init__(self, version):
CConnectionException.__init__(self)
self.m_version = version
def __str__(self):
return repr(self.m_version)
class UnexpectedData(CConnectionException):
"""Unexpected data."""
class AlreadyAttached(CConnectionException):
"""Already Attached."""
class NotAttached(CConnectionException):
"""Not Attached."""
class SpawnUnsupported(CConnectionException):
"""Spawn Unsupported."""
class UnknownServer(CConnectionException):
"""Unknown Server."""
class CSecurityException(CConnectionException):
def __init__(self, *args):
CConnectionException.__init__(self, *args)
class UnsetPassword(CSecurityException):
"""Unset Password."""
class EncryptionNotSupported(CSecurityException):
"""Encryption Not Supported."""
class EncryptionExpected(CSecurityException):
"""Encryption Expected."""
class DecryptionFailure(CSecurityException):
"""Decryption Failure."""
class AuthenticationBadData(CSecurityException):
"""Authentication Bad Data."""
class AuthenticationFailure(CSecurityException):
"""Authentication Failure."""
class AuthenticationBadIndex(CSecurityException):
"""Authentication Bad Index."""
def __init__(self, max_index = 0, anchor = 0):
CSecurityException.__init__(self)
self.m_max_index = max_index
self.m_anchor = anchor
def __str__(self):
return repr((self.m_max_index, self.m_anchor))
#
#----------------- unicode handling for compatibility with py3k ----------------
#
def is_py3k():
return sys.version_info[0] >= 3
def is_unicode(s):
if is_py3k() and type(s) == str:
return True
if type(s) == unicode:
return True
return False
def as_unicode(s, encoding = 'utf-8', fstrict = False):
if is_unicode(s):
return s
if fstrict:
u = s.decode(encoding)
else:
u = s.decode(encoding, 'replace')
return u
def as_string(s, encoding = 'utf-8', fstrict = False):
if is_py3k():
if is_unicode(s):
return s
if fstrict:
e = s.decode(encoding)
else:
e = s.decode(encoding, 'replace')
return e
if not is_unicode(s):
return s
if fstrict:
e = s.encode(encoding)
else:
e = s.encode(encoding, 'replace')
return e
def as_bytes(s, encoding = 'utf-8', fstrict = True):
if not is_unicode(s):
return s
if fstrict:
b = s.encode(encoding)
else:
b = s.encode(encoding, 'replace')
return b
#
#----------------------- Infinite List of Globals ---------------------------
#
#
# According to PEP-8: "Use 4 spaces per indentation level."
#
PYTHON_TAB_WIDTH = 4
GNOME_DEFAULT_TERM = 'gnome-terminal'
NT_DEBUG = 'nt_debug'
SCREEN = 'screen'
MAC = 'mac'
DARWIN = 'darwin'
POSIX = 'posix'
#
# Map between OS type and relevant command to initiate a new OS console.
# entries for other OSs can be added here.
# '%s' serves as a place holder.
#
# Currently there is no difference between 'nt' and NT_DEBUG, since now
# both of them leave the terminal open after termination of debuggee to
# accommodate scenarios of scripts with child processes.
#
osSpawn = {
'nt': 'start "rpdb2 - Version ' + get_version() + ' - Debuggee Console" cmd.exe /K ""%(exec)s" %(options)s"',
NT_DEBUG: 'start "rpdb2 - Version ' + get_version() + ' - Debuggee Console" cmd.exe /K ""%(exec)s" %(options)s"',
POSIX: "%(term)s -e %(shell)s -c '%(exec)s %(options)s; %(shell)s' &",
'Terminal': "Terminal --disable-server -x %(shell)s -c '%(exec)s %(options)s; %(shell)s' &",
GNOME_DEFAULT_TERM: "gnome-terminal --disable-factory -x %(shell)s -c '%(exec)s %(options)s; %(shell)s' &",
MAC: '%(exec)s %(options)s',
DARWIN: '%(exec)s %(options)s',
SCREEN: 'screen -t debuggee_console %(exec)s %(options)s'
}
RPDBTERM = 'RPDBTERM'
COLORTERM = 'COLORTERM'
TERM = 'TERM'
KDE_PREFIX = 'KDE'
GNOME_PREFIX = 'GNOME'
KDE_DEFAULT_TERM_QUERY = "kreadconfig --file kdeglobals --group General --key TerminalApplication --default konsole"
XTERM = 'xterm'
RXVT = 'rxvt'
RPDB_SETTINGS_FOLDER = '.rpdb2_settings'
RPDB_PWD_FOLDER = os.path.join(RPDB_SETTINGS_FOLDER, 'passwords')
RPDB_BPL_FOLDER = os.path.join(RPDB_SETTINGS_FOLDER, 'breakpoints')
RPDB_BPL_FOLDER_NT = 'rpdb2_breakpoints'
MAX_BPL_FILES = 100
EMBEDDED_SYNC_THRESHOLD = 1.0
EMBEDDED_SYNC_TIMEOUT = 5.0
HEARTBEAT_TIMEOUT = 16
IDLE_MAX_RATE = 2.0
PING_TIMEOUT = 4.0
LOCAL_TIMEOUT = 1.0
COMMUNICATION_RETRIES = 5
WAIT_FOR_BREAK_TIMEOUT = 3.0
SHUTDOWN_TIMEOUT = 4.0
STARTUP_TIMEOUT = 3.0
STARTUP_RETRIES = 3
LOOPBACK = '127.0.0.1'
LOCALHOST = 'localhost'
SERVER_PORT_RANGE_START = 51000
SERVER_PORT_RANGE_LENGTH = 24
SOURCE_EVENT_CALL = 'C'
SOURCE_EVENT_LINE = 'L'
SOURCE_EVENT_RETURN = 'R'
SOURCE_EVENT_EXCEPTION = 'E'
SOURCE_STATE_UNBROKEN = '*'
SOURCE_BP_ENABLED = 'B'
SOURCE_BP_DISABLED = 'D'
SYMBOL_MARKER = '>'
SYMBOL_ALL = '*'
SOURCE_MORE = '+'
SOURCE_LESS = '-'
SOURCE_ENTIRE_FILE = '^'
CONSOLE_PRINTER = '*** '
CONSOLE_WRAP_INDEX = 78
CONSOLE_PROMPT = '\n> '
CONSOLE_PROMPT_ANALYZE = '\nAnalayze> '
CONSOLE_INTRO = ("""RPDB2 - The Remote Python Debugger, version %s,
Copyright (C) 2005-2009 Nir Aides.
Type "help", "copyright", "license", "credits" for more information.""" % (RPDB_VERSION))
PRINT_NOTICE_PROMPT = "Hit Return for more, or q (and Return) to quit:"
PRINT_NOTICE_LINES_PER_SECTION = 20
STR_NO_THREADS = "Operation failed since no traced threads were found."
STR_STARTUP_NOTICE = "Attaching to debuggee..."
STR_SPAWN_UNSUPPORTED = "The debugger does not know how to open a new console on this system. You can start the debuggee manually with the -d flag on a separate console and then use the 'attach' command to attach to it."
STR_SPAWN_UNSUPPORTED_SCREEN_SUFFIX = """Alternatively, you can use the screen utility and invoke rpdb2 in screen mode with the -s command-line flag as follows:
screen rpdb2 -s some-script.py script-arg1 script-arg2..."""
STR_AUTOMATIC_LAUNCH_UNKNOWN = STR_SPAWN_UNSUPPORTED
STR_STARTUP_SPAWN_NOTICE = "Starting debuggee..."
STR_KILL_NOTICE = "Stopping debuggee..."
STR_STARTUP_FAILURE = "Debuggee failed to start in a timely manner."
STR_OUTPUT_WARNING = "Textual output will be done at the debuggee."
STR_OUTPUT_WARNING_ASYNC = "The operation will continue to run in the background."
STR_ANALYZE_GLOBALS_WARNING = "In analyze mode the globals and locals dictionaries are read only."
STR_BREAKPOINTS_LOADED = "Breakpoints were loaded."
STR_BREAKPOINTS_SAVED = "Breakpoints were saved."
STR_BREAKPOINTS_SAVE_PROBLEM = "A problem occurred while saving the breakpoints."
STR_BREAKPOINTS_LOAD_PROBLEM = "A problem occurred while loading the breakpoints."
STR_BREAKPOINTS_NOT_SAVED = "Breakpoints were not saved."
STR_BREAKPOINTS_NOT_LOADED = "Breakpoints were not loaded."
STR_BREAKPOINTS_FILE_NOT_FOUND = "Breakpoints file was not found."
STR_BREAKPOINTS_NOT_FOUND = "No Breakpoints were found."
STR_BAD_FILENAME = "Bad File Name."
STR_SOME_BREAKPOINTS_NOT_LOADED = "Some breakpoints were not loaded, because of an error."
STR_BAD_EXPRESSION = "Bad expression '%s'."
STR_FILE_NOT_FOUND = "File '%s' not found."
STR_DISPLAY_ERROR = """If the X server (Windowing system) is not started you need to use rpdb2 with the screen utility and invoke rpdb2 in screen mode with the -s command-line flag as follows:
screen rpdb2 -s some-script.py script-arg1 script-arg2..."""
STR_EXCEPTION_NOT_FOUND = "No exception was found."
STR_SCOPE_NOT_FOUND = "Scope '%s' not found."
STR_NO_SUCH_BREAKPOINT = "Breakpoint not found."
STR_THREAD_NOT_FOUND = "Thread was not found."
STR_NO_THREADS_FOUND = "No threads were found."
STR_THREAD_NOT_BROKEN = "Thread is running."
STR_THREAD_FOCUS_SET = "Focus was set to chosen thread."
STR_ILEGAL_ANALYZE_MODE_ARG = "Argument is not allowed in analyze mode. Type 'help analyze' for more info."
STR_ILEGAL_ANALYZE_MODE_CMD = "Command is not allowed in analyze mode. Type 'help analyze' for more info."
STR_ANALYZE_MODE_TOGGLE = "Analyze mode was set to: %s."
STR_BAD_ARGUMENT = "Bad Argument."
STR_BAD_SYNTAX = 'Unknown syntax: %s\nDid you forget to use the exec or eval console commands?'
STR_PSYCO_WARNING = "The psyco module was detected. The debugger is incompatible with the psyco module and will not function correctly as long as the psyco module is imported and used."
STR_CONFLICTING_MODULES = "The modules: %s, which are incompatible with the debugger were detected and can possibly cause the debugger to fail."
STR_SIGNAL_INTERCEPT = "The signal %s(%d) was intercepted inside debugger tracing logic. It will be held pending until the debugger continues. Any exceptions raised by the handler will be ignored!"
STR_SIGNAL_EXCEPTION = "Exception %s raised by handler of signal %s(%d) inside debugger tracing logic was ignored!"
STR_DEBUGGEE_TERMINATED = "Debuggee has terminated."
STR_DEBUGGEE_NOT_BROKEN = "Debuggee has to be waiting at break point to complete this command."
STR_DEBUGGER_HAS_BROKEN = "Debuggee is waiting at break point for further commands."
STR_ALREADY_ATTACHED = "Already attached. Detach from debuggee and try again."
STR_NOT_ATTACHED = "Not attached to any script. Attach to a script and try again."
STR_COMMUNICATION_FAILURE = "Failed to communicate with debugged script."
STR_ERROR_OTHER = "Command returned the following error:\n%(type)s, %(value)s.\nPlease check stderr for stack trace and report to support."
STR_BAD_MBCS_PATH = "The debugger can not launch the script since the path to the Python executable or the debugger scripts can not be encoded into the default system code page. Please check the settings of 'Language for non-Unicode programs' in the Advanced tab of the Windows Regional and Language Options dialog."
STR_LOST_CONNECTION = "Lost connection to debuggee."
STR_FIREWALL_BLOCK = "A firewall is blocking the local communication chanel (socket) that is required between the debugger and the debugged script. Please make sure that the firewall allows that communication."
STR_BAD_VERSION = "A debuggee was found with incompatible debugger version %(value)s."
STR_BAD_VERSION2 = "While attempting to find the specified debuggee at least one debuggee was found that uses incompatible version of RPDB2."
STR_UNEXPECTED_DATA = "Unexpected data received."
STR_ACCESS_DENIED = "While attempting to find debuggee, at least one debuggee denied connection because of mismatched passwords. Please verify your password."
STR_ACCESS_DENIED2 = "Communication is denied because of un-matching passwords."
STR_ENCRYPTION_EXPECTED = "While attempting to find debuggee, at least one debuggee denied connection since it accepts encrypted connections only."
STR_ENCRYPTION_EXPECTED2 = "Debuggee will only talk over an encrypted channel."
STR_DECRYPTION_FAILURE = "Bad packet was received by the debuggee."
STR_DEBUGGEE_NO_ENCRYPTION = "Debuggee does not support encrypted mode. Either install the python-crypto package on the debuggee machine or allow unencrypted connections."
STR_RANDOM_PASSWORD = "Password has been set to a random password."
STR_PASSWORD_INPUT = "Please type a password:"
STR_PASSWORD_CONFIRM = "Password has been set."
STR_PASSWORD_NOT_SUPPORTED = "The --pwd flag is only supported on NT systems."
STR_PASSWORD_MUST_BE_SET = "A password should be set to secure debugger client-server communication."
STR_BAD_DATA = "Bad data received from debuggee."
STR_BAD_FILE_DATA = "Bad data received from file."
STR_ATTACH_FAILED = "Failed to attach"
STR_ATTACH_FAILED_NAME = "Failed to attach to '%s'."
STR_ATTACH_CRYPTO_MODE = "Debug Channel is%s encrypted."
STR_ATTACH_CRYPTO_MODE_NOT = "NOT"
STR_ATTACH_SUCCEEDED = "Successfully attached to '%s'."
STR_ATTEMPTING_TO_STOP = "Requesting script to stop."
STR_ATTEMPTING_TO_DETACH = "Detaching from script..."
STR_DETACH_SUCCEEDED = "Detached from script."
STR_DEBUGGEE_UNKNOWN = "Failed to find script."
STR_MULTIPLE_DEBUGGEES = "WARNING: There is more than one debuggee '%s'."
MSG_ERROR_HOST_TEXT = """The debugger was not able to set the host to '%s'.
The following error was returned:
%s"""
STR_SOURCE_NOT_FOUND = "Failed to get source from debuggee."
STR_SCRIPTS_CONNECTING = "Connecting to '%s'..."
STR_SCRIPTS_NO_SCRIPTS = "No scripts to debug on '%s'"
STR_SCRIPTS_TO_DEBUG = """Scripts to debug on '%s':
pid name
--------------------------"""
STR_STACK_TRACE = """Stack trace for thread %d:
Frame File Name Line Function
------------------------------------------------------------------------------"""
STR_SOURCE_LINES = """Source lines for thread %d from file '%s':
"""
STR_ACTIVE_THREADS = """List of active threads known to the debugger:
No Tid Name State
-----------------------------------------------"""
STR_BREAKPOINTS_LIST = """List of breakpoints:
Id State Line Filename-Scope-Condition-Encoding
------------------------------------------------------------------------------"""
STR_BREAKPOINTS_TEMPLATE = """ %2d %-8s %5d %s
%s
%s
%s"""
STR_ENCRYPTION_SUPPORT_ERROR = "Encryption is not supported since the python-crypto package was not found. Either install the python-crypto package or allow unencrypted connections."
STR_PASSWORD_NOT_SET = 'Password is not set.'
STR_PASSWORD_SET = 'Password is set to: "%s"'
STR_PASSWORD_BAD = 'The password should begin with a letter and continue with any combination of digits, letters or underscores (\'_\'). Only English characters are accepted for letters.'
STR_ENCRYPT_MODE = 'Force encryption mode: %s'
STR_REMOTE_MODE = 'Allow remote machines mode: %s'
STR_ENCODING_MODE = 'Encoding is set to: %s'
STR_ENCODING_MODE_SET = 'Encoding was set to: %s'
STR_ENCODING_BAD = 'The specified encoding was not recognized by the debugger.'
STR_ENVIRONMENT = 'The current environment mapping is:'
STR_ENVIRONMENT_EMPTY = 'The current environment mapping is not set.'
STR_SYNCHRONICITY_BAD = "Can not process command when thread is running unless synchronicity mode is turned on. Type 'help synchro' at the command prompt for more information."
STR_SYNCHRONICITY_MODE = 'The synchronicity mode is set to: %s'
STR_TRAP_MODE = 'Trap unhandled exceptions mode is set to: %s'
STR_TRAP_MODE_SET = "Trap unhandled exceptions mode was set to: %s."
STR_FORK_MODE = "Fork mode is set to: %s, %s."
STR_FORK_MODE_SET = "Fork mode was set to: %s, %s."
STR_LOCAL_NAMESPACE_WARNING = 'Debugger modifications to the original bindings of the local namespace of this frame will be committed before the execution of the next statement of the frame. Any code using these variables executed before that point will see the original values.'
STR_WARNING = 'Warning: %s'
STR_MAX_NAMESPACE_WARNING_TITLE = 'Namespace Warning'
STR_MAX_NAMESPACE_WARNING_TYPE = '*** WARNING ***'
STR_MAX_NAMESPACE_WARNING_MSG = 'Number of items exceeds capacity of namespace browser.'
STR_MAX_EVALUATE_LENGTH_WARNING = 'Output length exeeds maximum capacity.'
FORK_CHILD = 'child'
FORK_PARENT = 'parent'
FORK_MANUAL = 'manual'
FORK_AUTO = 'auto'
ENCRYPTION_ENABLED = 'encrypted'
ENCRYPTION_DISABLED = 'plain-text'
STATE_ENABLED = 'enabled'
STATE_DISABLED = 'disabled'
BREAKPOINTS_FILE_EXT = '.bpl'
PYTHON_FILE_EXTENSION = '.py'
PYTHONW_FILE_EXTENSION = '.pyw'
PYTHONW_SO_EXTENSION = '.so'
PYTHON_EXT_LIST = ['.py', '.pyw', '.pyc', '.pyd', '.pyo', '.so']
MODULE_SCOPE = '?'
MODULE_SCOPE2 = '<module>'
BLENDER_SOURCE_NOT_AVAILABLE = as_unicode('Blender script source code is not available.')
SOURCE_NOT_AVAILABLE = as_unicode('Source code is not available.')
SCOPE_SEP = '.'
BP_FILENAME_SEP = ':'
BP_EVAL_SEP = ','
DEBUGGER_FILENAME = 'rpdb2.py'
THREADING_FILENAME = 'threading.py'
STR_STATE_BROKEN = 'waiting at break point'
STATE_BROKEN = 'broken'
STATE_RUNNING = 'running'
STATE_ANALYZE = 'analyze'
STATE_DETACHED = 'detached'
STATE_DETACHING = 'detaching'
STATE_SPAWNING = 'spawning'
STATE_ATTACHING = 'attaching'
DEFAULT_NUMBER_OF_LINES = 20
DICT_KEY_TID = 'tid'
DICT_KEY_STACK = 'stack'
DICT_KEY_CODE_LIST = 'code_list'
DICT_KEY_CURRENT_TID = 'current tid'
DICT_KEY_BROKEN = 'broken'
DICT_KEY_BREAKPOINTS = 'breakpoints'
DICT_KEY_LINES = 'lines'
DICT_KEY_FILENAME = 'filename'
DICT_KEY_FIRST_LINENO = 'first_lineno'
DICT_KEY_FRAME_LINENO = 'frame_lineno'
DICT_KEY_EVENT = 'event'
DICT_KEY_EXPR = 'expr'
DICT_KEY_NAME = 'name'
DICT_KEY_REPR = 'repr'
DICT_KEY_IS_VALID = 'fvalid'
DICT_KEY_TYPE = 'type'
DICT_KEY_SUBNODES = 'subnodes'
DICT_KEY_N_SUBNODES = 'n_subnodes'
DICT_KEY_ERROR = 'error'
RPDB_EXEC_INFO = as_unicode('rpdb_exception_info')
MODE_ON = 'ON'
MODE_OFF = 'OFF'
ENCODING_UTF8_PREFIX_1 = '\xef\xbb\xbf'
ENCODING_SOURCE = '# -*- coding: %s -*-\n'
ENCODING_AUTO = as_unicode('auto')
ENCODING_RAW = as_unicode('raw')
ENCODING_RAW_I = as_unicode('__raw')
MAX_EVALUATE_LENGTH = 256 * 1024
MAX_NAMESPACE_ITEMS = 1024
MAX_SORTABLE_LENGTH = 256 * 1024
REPR_ID_LENGTH = 4096
MAX_NAMESPACE_WARNING = {
DICT_KEY_EXPR: STR_MAX_NAMESPACE_WARNING_TITLE,
DICT_KEY_NAME: STR_MAX_NAMESPACE_WARNING_TITLE,
DICT_KEY_REPR: STR_MAX_NAMESPACE_WARNING_MSG,
DICT_KEY_IS_VALID: False,
DICT_KEY_TYPE: STR_MAX_NAMESPACE_WARNING_TYPE,
DICT_KEY_N_SUBNODES: 0
}
MAX_EVENT_LIST_LENGTH = 1000
EVENT_EXCLUDE = 'exclude'
EVENT_INCLUDE = 'include'
INDEX_TABLE_SIZE = 100
DISPACHER_METHOD = 'dispatcher_method'
CONFLICTING_MODULES = ['psyco', 'pdb', 'bdb', 'doctest']
XML_DATA = """<?xml version='1.0'?>
<methodCall>
<methodName>dispatcher_method</methodName>
<params>
<param>
<value><string>%s</string></value>
</param>
</params>
</methodCall>""" % RPDB_COMPATIBILITY_VERSION
N_WORK_QUEUE_THREADS = 8
DEFAULT_PATH_SUFFIX_LENGTH = 55
ELLIPSIS_UNICODE = as_unicode('...')
ELLIPSIS_BYTES = as_bytes('...')
ERROR_NO_ATTRIBUTE = 'Error: No attribute.'
g_server_lock = threading.RLock()
g_server = None
g_debugger = None
g_fScreen = False
g_fDefaultStd = True
#
# In debug mode errors and tracebacks are printed to stdout
#
g_fDebug = False
#
# Lock for the traceback module to prevent it from interleaving
# output from different threads.
#
g_traceback_lock = threading.RLock()
g_source_provider_aux = None
g_lines_cache = {}
g_initial_cwd = []
g_error_mapping = {
socket.error: STR_COMMUNICATION_FAILURE,
CConnectionException: STR_LOST_CONNECTION,
FirewallBlock: STR_FIREWALL_BLOCK,
BadVersion: STR_BAD_VERSION,
UnexpectedData: STR_UNEXPECTED_DATA,
SpawnUnsupported: STR_SPAWN_UNSUPPORTED,
UnknownServer: STR_DEBUGGEE_UNKNOWN,
UnsetPassword: STR_PASSWORD_MUST_BE_SET,
EncryptionNotSupported: STR_DEBUGGEE_NO_ENCRYPTION,
EncryptionExpected: STR_ENCRYPTION_EXPECTED,
DecryptionFailure: STR_DECRYPTION_FAILURE,
AuthenticationBadData: STR_ACCESS_DENIED,
AuthenticationFailure: STR_ACCESS_DENIED,
BadMBCSPath: STR_BAD_MBCS_PATH,
AlreadyAttached: STR_ALREADY_ATTACHED,
NotAttached: STR_NOT_ATTACHED,
DebuggerNotBroken: STR_DEBUGGEE_NOT_BROKEN,
NoThreads: STR_NO_THREADS,
NoExceptionFound: STR_EXCEPTION_NOT_FOUND,
}
#
# These globals are related to handling the os.fork() os._exit() and exec
# pattern.
#
g_forkpid = None
g_forktid = None
g_fignorefork = False
g_exectid = None
g_execpid = None
g_fos_exit = False
#
# To hold a reference to __main__ to prevent its release if an unhandled
# exception is raised.
#
g_module_main = None
g_found_conflicting_modules = []
g_fignore_atexit = False
g_ignore_broken_pipe = 0
#
# Unicode version of path names that do not encode well witn the windows
# 'mbcs' encoding. This dict is used to work with such path names on
# windows.
#
g_found_unicode_files = {}
g_frames_path = {}
g_signal_handlers = {}
g_signals_pending = []
#g_profile = None
g_fFirewallTest = True
if is_py3k():
g_safe_base64_to = bytes.maketrans(as_bytes('/+='), as_bytes('_-#'))
g_safe_base64_from = bytes.maketrans(as_bytes('_-#'), as_bytes('/+='))
else:
g_safe_base64_to = string.maketrans(as_bytes('/+='), as_bytes('_-#'))
g_safe_base64_from = string.maketrans(as_bytes('_-#'), as_bytes('/+='))
g_alertable_waiters = {}
g_builtins_module = sys.modules.get('__builtin__', sys.modules.get('builtins'))
#
# ---------------------------- General Utils ------------------------------
#
def job_wrapper(event, foo, *args, **kwargs):
try:
#print_debug('Thread %d doing job %s' % (thread.get_ident(), foo.__name__))
foo(*args, **kwargs)
finally:
event.set()
def send_job(tid, timeout, foo, *args, **kwargs):
#
# Attempt to send job to thread tid.
# Will throw KeyError if thread tid is not available for jobs.
#
(lock, jobs) = g_alertable_waiters[tid]
event = threading.Event()
f = lambda: job_wrapper(event, foo, *args, **kwargs)
jobs.append(f)
try:
lock.acquire()
lock_notify_all(lock)
finally:
lock.release()
safe_wait(event, timeout)
def alertable_wait(lock, timeout = None):
jobs = []
tid = thread.get_ident()
g_alertable_waiters[tid] = (lock, jobs)
try:
safe_wait(lock, timeout)
while len(jobs) != 0:
job = jobs.pop(0)
try:
job()
except:
pass
if len(jobs) == 0:
time.sleep(0.1)
finally:
del g_alertable_waiters[tid]
def safe_wait(lock, timeout = None):
#
# workaround windows bug where signal handlers might raise exceptions
# even if they return normally.
#
while True:
try:
t0 = time.time()
return lock.wait(timeout)
except:
if timeout == None:
continue
timeout -= (time.time() - t0)
if timeout <= 0:
return
#
# The following code is related to the ability of the debugger
# to work both on Python 2.5 and 3.0.
#
def lock_notify_all(lock):
try:
if is_py3k():
return lock.notify_all()
except AttributeError:
pass
return lock.notifyAll()
def event_is_set(event):
try:
if is_py3k():
return event.is_set()
except AttributeError:
pass
return event.isSet()
def thread_set_daemon(thread, fdaemon):
try:
if is_py3k():
return thread.set_daemon(fdaemon)
except AttributeError:
pass
return thread.setDaemon(fdaemon)
def thread_is_alive(thread):
try:
if is_py3k():
return thread.is_alive()
except AttributeError:
pass
return thread.isAlive()
def thread_set_name(thread, name):
try:
if is_py3k():
return thread.set_name(name)
except AttributeError:
pass
return thread.setName(name)
def thread_get_name(thread):
try:
if is_py3k():
return thread.get_name()
except AttributeError:
pass
return thread.getName()
def current_thread():
try:
if is_py3k():
return threading.current_thread()
except AttributeError:
pass
return threading.currentThread()
class _stub_type:
pass
def _rpdb2_bytes(s, e):
return s.encode(e)
if not hasattr(g_builtins_module, 'unicode'):
unicode = _stub_type
if not hasattr(g_builtins_module, 'long'):
long = _stub_type
if not hasattr(g_builtins_module, 'str8'):
str8 = _stub_type
if not hasattr(g_builtins_module, 'bytearray'):
bytearray = _stub_type
if not hasattr(g_builtins_module, 'bytes'):
bytes = _stub_type
#
# Pickle on Python 2.5 should know how to handle byte strings
# that arrive from Python 3.0 over sockets.
#
g_builtins_module.bytes = _rpdb2_bytes
if is_py3k():
class sets:
Set = _stub_type
BaseSet = _stub_type
ImmutableSet = _stub_type
if sys.version_info[:2] <= (2, 3):
set = sets.Set
def _raw_input(s):
if is_py3k():
return input(s)
i = raw_input(s)
i = as_unicode(i, detect_encoding(sys.stdin), fstrict = True)
return i
def _print(s, f = sys.stdout, feol = True):
s = as_unicode(s)
encoding = detect_encoding(f)
s = as_bytes(s, encoding, fstrict = False)
s = as_string(s, encoding)
if feol:
f.write(s + '\n')
else:
f.write(s)
def detect_encoding(file):
try:
encoding = file.encoding
if encoding == None:
return detect_locale()
except:
return detect_locale()
try:
codecs.lookup(encoding)
return encoding
except:
pass
if encoding.lower().startswith('utf_8'):
return 'utf-8'
return 'ascii'
def detect_locale():
encoding = locale.getdefaultlocale()[1]
if encoding == None:
return 'ascii'
try:
codecs.lookup(encoding)
return encoding
except:
pass
if encoding.lower().startswith('utf_8'):
return 'utf-8'
return 'ascii'
def class_name(c):
s = safe_str(c)
if "'" in s:
s = s.split("'")[1]
assert(s.startswith(__name__ + '.'))
return s
def clip_filename(path, n = DEFAULT_PATH_SUFFIX_LENGTH):
suffix = calc_suffix(path, n)
if not suffix.startswith('...'):
return suffix
index = suffix.find(os.sep)
if index == -1:
return suffix
clip = '...' + suffix[index:]
return clip
def safe_str(x):
try:
return str(x)
except:
return 'N/A'
def safe_repr(x):
try:
return repr(x)
except:
return 'N/A'
def parse_type(t):
rt = safe_repr(t)
if not "'" in rt:
return rt
st = rt.split("'")[1]
return st
def repr_list(pattern, l, length, encoding, is_valid):
length = max(0, length - len(pattern) + 2)
s = ''
index = 0
try:
for i in l:
#
# Remove any trace of session password from data structures that
# go over the network.
#
if type(i) == str and i in ['_rpdb2_args', '_rpdb2_pwd', 'm_rpdb2_pwd']:
continue
s += repr_ltd(i, length - len(s), encoding, is_valid)
index += 1
if index < len(l) and len(s) > length:
is_valid[0] = False
if not s.endswith('...'):
s += '...'
break
if index < len(l) or (index == 1 and pattern[0] == '('):
s += ', '
except AttributeError:
is_valid[0] = False
return as_unicode(pattern % s)
def repr_dict(pattern, d, length, encoding, is_valid):
length = max(0, length - len(pattern) + 2)
s = ''
index = 0
try:
for k in d:
#
# Remove any trace of session password from data structures that
# go over the network.
#
if type(k) == str and k in ['_rpdb2_args', '_rpdb2_pwd', 'm_rpdb2_pwd']:
continue
v = d[k]
s += repr_ltd(k, length - len(s), encoding, is_valid)
if len(s) > length:
is_valid[0] = False
if not s.endswith('...'):
s += '...'
break
s += ': ' + repr_ltd(v, length - len(s), encoding, is_valid)
index += 1
if index < len(d) and len(s) > length:
is_valid[0] = False
if not s.endswith('...'):
s += '...'
break
if index < len(d):
s += ', '
except AttributeError:
is_valid[0] = False
return as_unicode(pattern % s)
def repr_bytearray(s, length, encoding, is_valid):
try:
s = s.decode(encoding)
r = repr_unicode(s, length, is_valid)
return 'bytearray(b' + r[1:] + ')'
except:
#
# If a string is not encoded as utf-8 its repr() will be done with
# the regular repr() function.
#
return repr_str_raw(s, length, is_valid)
def repr_bytes(s, length, encoding, is_valid):
try:
s = s.decode(encoding)
r = repr_unicode(s, length, is_valid)
return 'b' + r[1:]
except:
#
# If a string is not encoded as utf-8 its repr() will be done with
# the regular repr() function.
#
return repr_str_raw(s, length, is_valid)
def repr_str8(s, length, encoding, is_valid):
try:
s = s.decode(encoding)
r = repr_unicode(s, length, is_valid)
return 's' + r[1:]
except:
#
# If a string is not encoded as utf-8 its repr() will be done with
# the regular repr() function.
#
return repr_str_raw(s, length, is_valid)
def repr_str(s, length, encoding, is_valid):
try:
s = as_unicode(s, encoding, fstrict = True)
r = repr_unicode(s, length, is_valid)
return r[1:]
except:
#
# If a string is not encoded as utf-8 its repr() will be done with
# the regular repr() function.
#
return repr_str_raw(s, length, is_valid)
def repr_unicode(s, length, is_valid):
index = [2, 1][is_py3k()]
rs = ''
for c in s:
if len(rs) > length:
is_valid[0] = False
rs += '...'
break
if ord(c) < 128:
rs += repr(c)[index: -1]
else:
rs += c
if not "'" in rs:
return as_unicode("u'%s'" % rs)
if not '"' in rs:
return as_unicode('u"%s"' % rs)
return as_unicode("u'%s'" % rs.replace("'", "\\'"))
def repr_str_raw(s, length, is_valid):
if is_unicode(s):
eli = ELLIPSIS_UNICODE
else:
eli = ELLIPSIS_BYTES
if len(s) > length:
is_valid[0] = False
s = s[: length] + eli
return as_unicode(repr(s))
def repr_base(v, length, is_valid):
r = repr(v)
if len(r) > length:
is_valid[0] = False
r = r[: length] + '...'
return as_unicode(r)
def repr_ltd(x, length, encoding, is_valid = [True]):
try:
length = max(0, length)
try:
if isinstance(x, frozenset):
return repr_list('frozenset([%s])', x, length, encoding, is_valid)
if isinstance(x, set):
return repr_list('set([%s])', x, length, encoding, is_valid)
except NameError:
pass
if isinstance(x, sets.Set):
return repr_list('sets.Set([%s])', x, length, encoding, is_valid)
if isinstance(x, sets.ImmutableSet):
return repr_list('sets.ImmutableSet([%s])', x, length, encoding, is_valid)
if isinstance(x, list):
return repr_list('[%s]', x, length, encoding, is_valid)
if isinstance(x, tuple):
return repr_list('(%s)', x, length, encoding, is_valid)
if isinstance(x, dict):
return repr_dict('{%s}', x, length, encoding, is_valid)
if encoding == ENCODING_RAW_I and [True for t in [str, unicode, bytearray, bytes, str8] if t is type(x)]:
return repr_str_raw(x, length, is_valid)
if type(x) is unicode:
return repr_unicode(x, length, is_valid)
if type(x) is bytearray:
return repr_bytearray(x, length, encoding, is_valid)
if type(x) is bytes:
return repr_bytes(x, length, encoding, is_valid)
if type(x) is str8:
return repr_str8(x, length, encoding, is_valid)
if type(x) is str:
return repr_str(x, length, encoding, is_valid)
if [True for t in [bool, int, float, long, type(None)] if t is type(x)]:
return repr_base(x, length, is_valid)
is_valid[0] = False
y = safe_repr(x)[: length]
if len(y) == length:
y += '...'
if encoding == ENCODING_RAW_I:
encoding = 'utf-8'
try:
y = as_unicode(y, encoding, fstrict = True)
return y
except:
pass
encoding = sys.getfilesystemencoding()
y = as_unicode(y, encoding)
return y
except:
print_debug_exception()
return as_unicode('N/A')
def print_debug(_str):
if not g_fDebug:
return
t = time.time()
l = time.localtime(t)
s = time.strftime('%H:%M:%S', l) + '.%03d' % ((t - int(t)) * 1000)
f = sys._getframe(1)
filename = os.path.basename(f.f_code.co_filename)
lineno = f.f_lineno
name = f.f_code.co_name
str = '%s %s:%d in %s: %s' % (s, filename, lineno, name, _str)
_print(str, sys.__stderr__)
def print_debug_exception(fForce = False):
"""
Print exceptions to stdout when in debug mode.
"""
if not g_fDebug and not fForce:
return
(t, v, tb) = sys.exc_info()
print_exception(t, v, tb, fForce)
class CFileWrapper:
def __init__(self, f):
self.m_f = f
def write(self, s):
_print(s, self.m_f, feol = False)
def __getattr__(self, name):
return self.m_f.__getattr__(name)
def print_exception(t, v, tb, fForce = False):
"""
Print exceptions to stderr when in debug mode.
"""
if not g_fDebug and not fForce:
return
try:
g_traceback_lock.acquire()
traceback.print_exception(t, v, tb, file = CFileWrapper(sys.stderr))
finally:
g_traceback_lock.release()
def print_stack():
"""
Print exceptions to stdout when in debug mode.
"""
if g_fDebug == True:
try:
g_traceback_lock.acquire()
traceback.print_stack(file = CFileWrapper(sys.stderr))
finally:
g_traceback_lock.release()
#
# myisfile() is similar to os.path.isfile() but also works with
# Python eggs.
#
def myisfile(path):
try:
mygetfile(path, False)
return True
except:
return False
#
# Read a file even if inside a Python egg.
#
def mygetfile(path, fread_file = True):
if os.path.isfile(path):
if not fread_file:
return
if sys.platform == 'OpenVMS':
#
# OpenVMS filesystem does not support byte stream.
#
mode = 'r'
else:
mode = 'rb'
f = open(path, mode)
data = f.read()
f.close()
return data
d = os.path.dirname(path)
while True:
if os.path.exists(d):
break
_d = os.path.dirname(d)
if _d in [d, '']:
raise IOError
d = _d
if not zipfile.is_zipfile(d):
raise IOError
z = zipimport.zipimporter(d)
try:
data = z.get_data(path[len(d) + 1:])
return data
except:
raise IOError
def split_command_line_path_filename_args(command_line):
"""
Split command line to a 3 elements tuple (path, filename, args)
"""
command_line = command_line.strip()
if len(command_line) == 0:
return ('', '', '')
if myisfile(command_line):
(_path, _filename) = split_path(command_line)
return (_path, _filename, '')
if command_line[0] in ['"', "'"]:
_command_line = command_line[1:]
i = _command_line.find(command_line[0])
if i == -1:
(_path, filename) = split_path(_command_line)
return (_path, filename, '')
else:
(_path, filename) = split_path(_command_line[: i])
args = _command_line[i + 1:].strip()
return (_path, filename, args)
else:
i = command_line.find(' ')
if i == -1:
(_path, filename) = split_path(command_line)
return (_path, filename, '')
else:
args = command_line[i + 1:].strip()
(_path, filename) = split_path(command_line[: i])
return (_path, filename, args)
def split_path(path):
(_path, filename) = os.path.split(path)
#
# Make sure path separator (e.g. '/') ends the splitted path if it was in
# the original path.
#
if (_path[-1:] not in [os.path.sep, os.path.altsep]) and \
(path[len(_path): len(_path) + 1] in [os.path.sep, os.path.altsep]):
_path = _path + path[len(_path): len(_path) + 1]
return (_path, filename)
def my_os_path_join(dirname, basename):
if is_py3k() or (type(dirname) == str and type(basename) == str):
return os.path.join(dirname, basename)
encoding = sys.getfilesystemencoding()
if type(dirname) == str:
dirname = dirname.decode(encoding)
if type(basename) == str:
basename = basename.decode(encoding)
return os.path.join(dirname, basename)
def calc_frame_path(frame):
globals_filename = frame.f_globals.get('__file__', None)
filename = frame.f_code.co_filename
if filename.startswith('<'):
if globals_filename == None:
return filename
else:
filename = CalcScriptName(os.path.basename(globals_filename))
if filename in g_frames_path:
return g_frames_path[filename]
if globals_filename != None:
dirname = os.path.dirname(globals_filename)
basename = os.path.basename(filename)
path = my_os_path_join(dirname, basename)
if os.path.isabs(path):
abspath = my_abspath(path)
lowered = winlower(abspath)
g_frames_path[filename] = lowered
return lowered
try:
abspath = FindFile(path, fModules = True)
lowered = winlower(abspath)
g_frames_path[filename] = lowered
return lowered
except IOError:
pass
if os.path.isabs(filename):
abspath = my_abspath(filename)
lowered = winlower(abspath)
g_frames_path[filename] = lowered
return lowered
try:
abspath = FindFile(filename, fModules = True)
lowered = winlower(abspath)
g_frames_path[filename] = lowered
return lowered
except IOError:
lowered = winlower(filename)
return lowered
def my_abspath(path):
"""
We need our own little version of os.path.abspath since the original
code imports modules in the 'nt' code path which can cause our debugger
to deadlock in unexpected locations.
"""
if path[:1] == '<':
#
# 'path' may also be '<stdin>' in which case it is left untouched.
#
return path
if os.name == 'nt':
return my_abspath1(path)
return os.path.abspath(path)
#
# MOD
#
def my_abspath1(path):
"""
Modification of ntpath.abspath() that avoids doing an import.
"""
if path:
try:
path = _getfullpathname(path)
except WindowsError:
pass
else:
try:
path = getcwd()
except UnicodeDecodeError:
#
# This exception can be raised in py3k (alpha) on nt.
#
path = getcwdu()
np = os.path.normpath(path)
if (len(np) >= 2) and (np[1:2] == ':'):
np = np[:1].upper() + np[1:]
return np
def IsPythonSourceFile(path):
if path.endswith(PYTHON_FILE_EXTENSION):
return True
if path.endswith(PYTHONW_FILE_EXTENSION):
return True
path = g_found_unicode_files.get(path, path)
for lineno in range(1, 10):
line = get_source_line(path, lineno)
if line.startswith('#!') and 'python' in line:
return True
if is_py3k():
#
# py3k does not have compiler.parseFile, so return
# True anyway...
#
return True
try:
compiler.parseFile(path)
return True
except:
return False
def CalcModuleName(filename):
_basename = os.path.basename(filename)
(modulename, ext) = os.path.splitext(_basename)
if ext in PYTHON_EXT_LIST:
return modulename
return _basename
def CalcScriptName(filename, fAllowAnyExt = True):
if filename.endswith(PYTHON_FILE_EXTENSION):
return filename
if filename.endswith(PYTHONW_FILE_EXTENSION):
return filename
if filename.endswith(PYTHONW_SO_EXTENSION):
scriptname = filename[:-3] + PYTHON_FILE_EXTENSION
return scriptname
if filename[:-1].endswith(PYTHON_FILE_EXTENSION):
scriptname = filename[:-1]
return scriptname
if fAllowAnyExt:
return filename
scriptname = filename + PYTHON_FILE_EXTENSION
return scriptname
def FindModuleDir(module_name):
if module_name == '':
raise IOError
dot_index = module_name.rfind('.')
if dot_index != -1:
parent = module_name[: dot_index]
child = module_name[dot_index + 1:]
else:
parent = ''
child = module_name
m = sys.modules[module_name]
if not hasattr(m, '__file__') or m.__file__ == None:
parent_dir = FindModuleDir(parent)
module_dir = my_os_path_join(parent_dir, winlower(child))
return module_dir
if not os.path.isabs(m.__file__):
parent_dir = FindModuleDir(parent)
module_dir = my_os_path_join(parent_dir, winlower(child))
return module_dir
(root, ext) = os.path.splitext(m.__file__)
if root.endswith('__init__'):
root = os.path.dirname(root)
abspath = my_abspath(root)
lowered = winlower(abspath)
return lowered
def FindFileAsModule(filename):
lowered = winlower(filename)
(root, ext) = os.path.splitext(lowered)
root_dotted = root.replace('\\', '.').replace('/', '.').replace(':', '.')
match_list = []
for (module_name, m) in list(sys.modules.items()):
lowered_module_name = winlower(module_name)
if (root_dotted + '.').startswith(lowered_module_name + '.'):
match_list.append((len(module_name), module_name))
if lowered_module_name == root_dotted:
break
match_list.sort()
match_list.reverse()
for (matched_len, matched_module) in match_list:
try:
module_dir = FindModuleDir(matched_module)
except IOError:
continue
suffix = root[matched_len:]
if suffix == '':
path = module_dir + ext
else:
path = my_os_path_join(module_dir, suffix.strip('\\')) + ext
scriptname = CalcScriptName(path, fAllowAnyExt = False)
if myisfile(scriptname):
return scriptname
#
# Check .pyw files
#
scriptname += 'w'
if scriptname.endswith(PYTHONW_FILE_EXTENSION) and myisfile(scriptname):
return scriptname
raise IOError
def getcwd():
try:
return os.getcwd()
except UnicodeDecodeError:
print_debug_exception(True)
raise
def getcwdu():
if hasattr(os, 'getcwdu'):
return os.getcwdu()
return getcwd()
def FindFile(
filename,
sources_paths = [],
fModules = False,
fAllowAnyExt = True
):
"""
FindFile looks for the full path of a script in a rather non-strict
and human like behavior.
ENCODING:
filename should be either Unicode or encoded with sys.getfilesystemencoding()!
Returned value is encoded with sys.getfilesystemencoding().
It will always look for .py or .pyw files even if a .pyc or no
extension is given.
1. It will check against loaded modules if asked.
1. full path (if exists).
2. sources_paths.
2. current path.
3. PYTHONPATH
4. PATH
"""
if filename in g_found_unicode_files:
return filename
if filename.startswith('<'):
raise IOError
filename = filename.strip('\'"')
filename = os.path.expanduser(filename)
if fModules and not (os.path.isabs(filename) or filename.startswith('.')):
try:
return winlower(FindFileAsModule(filename))
except IOError:
pass
if fAllowAnyExt:
try:
abspath = FindFile(
filename,
sources_paths,
fModules = False,
fAllowAnyExt = False
)
return abspath
except IOError:
pass
if os.path.isabs(filename) or filename.startswith('.'):
try:
scriptname = None
abspath = my_abspath(filename)
lowered = winlower(abspath)
scriptname = CalcScriptName(lowered, fAllowAnyExt)
if myisfile(scriptname):
return scriptname
#
# Check .pyw files
#
scriptname += 'w'
if scriptname.endswith(PYTHONW_FILE_EXTENSION) and myisfile(scriptname):
return scriptname
scriptname = None
raise IOError
finally:
if not is_py3k() and is_unicode(scriptname):
fse = sys.getfilesystemencoding()
_l = as_string(scriptname, fse)
if '?' in _l:
g_found_unicode_files[_l] = scriptname
return _l
scriptname = CalcScriptName(filename, fAllowAnyExt)
try:
cwd = [getcwd(), getcwdu()]
except UnicodeDecodeError:
#
# This exception can be raised in py3k (alpha) on nt.
#
cwd = [getcwdu()]
env_path = os.environ['PATH']
paths = sources_paths + cwd + g_initial_cwd + sys.path + env_path.split(os.pathsep)
try:
lowered = None
for p in paths:
f = my_os_path_join(p, scriptname)
abspath = my_abspath(f)
lowered = winlower(abspath)
if myisfile(lowered):
return lowered
#
# Check .pyw files
#
lowered += 'w'
if lowered.endswith(PYTHONW_FILE_EXTENSION) and myisfile(lowered):
return lowered
lowered = None
raise IOError
finally:
if not is_py3k() and is_unicode(lowered):
fse = sys.getfilesystemencoding()
_l = as_string(lowered, fse)
if '?' in _l:
g_found_unicode_files[_l] = lowered
return _l
def IsFileInPath(filename):
if filename == '':
return False
try:
FindFile(filename)
return True
except IOError:
return False
def IsPrefixInEnviron(_str):
for e in os.environ.keys():
if e.startswith(_str):
return True
return False
def CalcTerminalCommand():
"""
Calc the unix command to start a new terminal, for example: xterm
"""
if RPDBTERM in os.environ:
term = os.environ[RPDBTERM]
if IsFileInPath(term):
return term
if COLORTERM in os.environ:
term = os.environ[COLORTERM]
if IsFileInPath(term):
return term
if IsPrefixInEnviron(KDE_PREFIX):
(s, term) = commands.getstatusoutput(KDE_DEFAULT_TERM_QUERY)
if (s == 0) and IsFileInPath(term):
return term
elif IsPrefixInEnviron(GNOME_PREFIX):
if IsFileInPath(GNOME_DEFAULT_TERM):
return GNOME_DEFAULT_TERM
if IsFileInPath(XTERM):
return XTERM
if IsFileInPath(RXVT):
return RXVT
raise SpawnUnsupported
def CalcMacTerminalCommand(command):
"""
Calculate what to put in popen to start a given script.
Starts a tiny Applescript that performs the script action.
"""
#
# Quoting is a bit tricky; we do it step by step.
# Make Applescript string: put backslashes before double quotes and
# backslashes.
#
command = command.replace('\\', '\\\\').replace('"', '\\"')
#
# Make complete Applescript command.
#
command = 'tell application "Terminal" to do script "%s"' % command
#
# Make a shell single quoted string (put backslashed single quotes
# outside string).
#
command = command.replace("'", "'\\''")
#
# Make complete shell command.
#
return "osascript -e '%s'" % command
def winlower(path):
"""
return lowercase version of 'path' on NT systems.
On NT filenames are case insensitive so lowercase filenames
for comparison purposes on NT.
"""
if os.name == 'nt':
return path.lower()
return path
def source_provider_blender(filename):
"""
Return source code of the file referred by filename.
Support for debugging of Blender Python scripts.
Blender scripts are not always saved on disk, and their
source has to be queried directly from the Blender API.
http://www.blender.org
"""
if not 'Blender.Text' in sys.modules:
raise IOError
if filename.startswith('<'):
#
# This specifies blender source whose source is not
# available.
#
raise IOError(BLENDER_SOURCE_NOT_AVAILABLE)
_filename = os.path.basename(filename)
try:
t = sys.modules['Blender.Text'].get(_filename)
lines = t.asLines()
return '\n'.join(lines) + '\n'
except NameError:
f = winlower(_filename)
tlist = sys.modules['Blender.Text'].get()
t = None
for _t in tlist:
n = winlower(_t.getName())
if n == f:
t = _t
break
if t == None:
#
# filename does not specify a blender file. Raise IOError
# so that search can continue on file system.
#
raise IOError
lines = t.asLines()
return '\n'.join(lines) + '\n'
def source_provider_filesystem(filename):
l = mygetfile(filename)
if l[:3] == as_bytes(ENCODING_UTF8_PREFIX_1):
l = l[3:]
return l
def source_provider(filename):
source = None
ffilesystem = False
try:
if g_source_provider_aux != None:
source = g_source_provider_aux(filename)
except IOError:
v = sys.exc_info()[1]
if SOURCE_NOT_AVAILABLE in v.args:
raise
try:
if source == None:
source = source_provider_blender(filename)
except IOError:
v = sys.exc_info()[1]
if BLENDER_SOURCE_NOT_AVAILABLE in v.args:
raise
if source == None:
source = source_provider_filesystem(filename)
ffilesystem = True
encoding = ParseEncoding(source)
if not is_unicode(source):
source = as_unicode(source, encoding)
return source, encoding, ffilesystem
def lines_cache(filename):
filename = g_found_unicode_files.get(filename, filename)
if filename in g_lines_cache:
return g_lines_cache[filename]
(source, encoding, ffilesystem) = source_provider(filename)
source = source.replace(as_unicode('\r\n'), as_unicode('\n'))
lines = source.split(as_unicode('\n'))
g_lines_cache[filename] = (lines, encoding, ffilesystem)
return (lines, encoding, ffilesystem)
def get_source(filename):
(lines, encoding, ffilesystem) = lines_cache(filename)
source = as_unicode('\n').join(lines)
return (source, encoding)
def get_source_line(filename, lineno):
(lines, encoding, ffilesystem) = lines_cache(filename)
if lineno > len(lines):
return as_unicode('')
return lines[lineno - 1] + as_unicode('\n')
def is_provider_filesystem(filename):
try:
(lines, encoding, ffilesystem) = lines_cache(filename)
return ffilesystem
except IOError:
v = sys.exc_info()[1]
return not (BLENDER_SOURCE_NOT_AVAILABLE in v.args or SOURCE_NOT_AVAILABLE in v.args)
def get_file_encoding(filename):
(lines, encoding, ffilesystem) = lines_cache(filename)
return encoding
def ParseLineEncoding(l):
if l.startswith('# -*- coding: '):
e = l[len('# -*- coding: '):].split()[0]
return e
if l.startswith('# vim:fileencoding='):
e = l[len('# vim:fileencoding='):].strip()
return e
return None
def ParseEncoding(txt):
"""
Parse document encoding according to:
http://docs.python.org/ref/encodings.html
"""
eol = '\n'
if not is_unicode(txt):
eol = as_bytes('\n')
l = txt.split(eol, 20)[:-1]
for line in l:
line = as_unicode(line)
encoding = ParseLineEncoding(line)
if encoding is not None:
try:
codecs.lookup(encoding)
return encoding
except:
return 'utf-8'
return 'utf-8'
def _getpid():
try:
return os.getpid()
except:
return -1
def calcURL(host, port):
"""
Form HTTP URL from 'host' and 'port' arguments.
"""
url = "http://" + str(host) + ":" + str(port)
return url
def GetSocketError(e):
if (not isinstance(e.args, tuple)) or (len(e.args) == 0):
return -1
return e.args[0]
def ControlRate(t_last_call, max_rate):
"""
Limits rate at which this function is called by sleeping.
Returns the time of invocation.
"""
p = 1.0 / max_rate
t_current = time.time()
dt = t_current - t_last_call
if dt < p:
time.sleep(p - dt)
return t_current
def generate_rid():
"""
Return a 7 digits random id.
"""
rid = repr(random.randint(1000000, 9999999))
rid = as_unicode(rid)
return rid
def generate_random_char(_str):
"""
Return a random character from string argument.
"""
if _str == '':
return ''
i = random.randint(0, len(_str) - 1)
return _str[i]
def generate_random_password():
"""
Generate an 8 characters long password.
"""
s = 'abdefghijmnqrt' + 'ABDEFGHJLMNQRTY'
ds = '23456789_' + s
_rpdb2_pwd = generate_random_char(s)
for i in range(0, 7):
_rpdb2_pwd += generate_random_char(ds)
_rpdb2_pwd = as_unicode(_rpdb2_pwd)
return _rpdb2_pwd
def is_valid_pwd(_rpdb2_pwd):
if _rpdb2_pwd in [None, '']:
return False
try:
if not is_unicode(_rpdb2_pwd):
_rpdb2_pwd = _rpdb2_pwd.decode('ascii')
_rpdb2_pwd.encode('ascii')
except:
return False
for c in _rpdb2_pwd:
if c.isalnum():
continue
if c == '_':
continue
return False
return True
def is_encryption_supported():
"""
Is the Crypto module imported/available.
"""
return 'DES' in globals()
def calc_suffix(_str, n):
"""
Return an n charaters suffix of the argument string of the form
'...suffix'.
"""
if len(_str) <= n:
return _str
return '...' + _str[-(n - 3):]
def calc_prefix(_str, n):
"""
Return an n charaters prefix of the argument string of the form
'prefix...'.
"""
if len(_str) <= n:
return _str
return _str[: (n - 3)] + '...'
def create_rpdb_settings_folder():
"""
Create the settings folder on Posix systems:
'~/.rpdb2_settings' with mode 700.
"""
if os.name != POSIX:
return
home = os.path.expanduser('~')
rsf = os.path.join(home, RPDB_SETTINGS_FOLDER)
if not os.path.exists(rsf):
os.mkdir(rsf, int('0700', 8))
pwds = os.path.join(home, RPDB_PWD_FOLDER)
if not os.path.exists(pwds):
os.mkdir(pwds, int('0700', 8))
bpl = os.path.join(home, RPDB_BPL_FOLDER)
if not os.path.exists(bpl):
os.mkdir(bpl, int('0700', 8))
def cleanup_bpl_folder(path):
if random.randint(0, 10) > 0:
return
l = os.listdir(path)
if len(l) < MAX_BPL_FILES:
return
try:
ll = [(os.stat(os.path.join(path, f))[stat.ST_ATIME], f) for f in l]
except:
return
ll.sort()
for (t, f) in ll[: -MAX_BPL_FILES]:
try:
os.remove(os.path.join(path, f))
except:
pass
def calc_bpl_filename(filename):
key = as_bytes(filename)
tmp_filename = hmac.new(key).hexdigest()[:10]
if os.name == POSIX:
home = os.path.expanduser('~')
bpldir = os.path.join(home, RPDB_BPL_FOLDER)
cleanup_bpl_folder(bpldir)
path = os.path.join(bpldir, tmp_filename) + BREAKPOINTS_FILE_EXT
return path
#
# gettempdir() is used since it works with unicode user names on
# Windows.
#
tmpdir = tempfile.gettempdir()
bpldir = os.path.join(tmpdir, RPDB_BPL_FOLDER_NT)
if not os.path.exists(bpldir):
#
# Folder creation is done here since this is a temp folder.
#
try:
os.mkdir(bpldir, int('0700', 8))
except:
print_debug_exception()
raise CException
else:
cleanup_bpl_folder(bpldir)
path = os.path.join(bpldir, tmp_filename) + BREAKPOINTS_FILE_EXT
return path
def calc_pwd_file_path(rid):
"""
Calc password file path for Posix systems:
'~/.rpdb2_settings/<rid>'
"""
home = os.path.expanduser('~')
rsf = os.path.join(home, RPDB_PWD_FOLDER)
pwd_file_path = os.path.join(rsf, rid)
return pwd_file_path
def create_pwd_file(rid, _rpdb2_pwd):
"""
Create password file for Posix systems.
"""
if os.name != POSIX:
return
path = calc_pwd_file_path(rid)
fd = os.open(path, os.O_WRONLY | os.O_CREAT, int('0600', 8))
os.write(fd, as_bytes(_rpdb2_pwd))
os.close(fd)
def read_pwd_file(rid):
"""
Read password from password file for Posix systems.
"""
assert(os.name == POSIX)
path = calc_pwd_file_path(rid)
p = open(path, 'r')
_rpdb2_pwd = p.read()
p.close()
_rpdb2_pwd = as_unicode(_rpdb2_pwd, fstrict = True)
return _rpdb2_pwd
def delete_pwd_file(rid):
"""
Delete password file for Posix systems.
"""
if os.name != POSIX:
return
path = calc_pwd_file_path(rid)
try:
os.remove(path)
except:
pass
def CalcUserShell():
try:
s = os.getenv('SHELL')
if s != None:
return s
import getpass
username = getpass.getuser()
f = open('/etc/passwd', 'r')
l = f.read()
f.close()
ll = l.split('\n')
d = dict([(e.split(':', 1)[0], e.split(':')[-1]) for e in ll])
return d[username]
except:
return 'sh'
def IsFilteredAttribute(a):
if not (a.startswith('__') and a.endswith('__')):
return False
if a in ['__class__', '__bases__', '__file__', '__doc__', '__name__', '__all__', '__builtins__']:
return False
return True
def IsFilteredAttribute2(r, a):
try:
o = getattr(r, a)
r = parse_type(type(o))
if 'function' in r or 'method' in r or r == 'type':
return True
return False
except:
return False
def CalcFilteredDir(r, filter_level):
d = dir(r)
if 'finfo' in d and parse_type(type(r)) == 'mp_request':
#
# Workaround mod_python segfault in type(req.finfo) by
# removing this attribute from the namespace viewer.
#
d.remove('finfo')
if filter_level == 0:
return d
fd = [a for a in d if not IsFilteredAttribute(a)]
return fd
def CalcIdentity(r, filter_level):
if filter_level == 0:
return r
if not hasattr(r, 'im_func'):
return r
return r.im_func
def getattr_nothrow(o, a):
try:
return getattr(o, a)
except AttributeError:
return ERROR_NO_ATTRIBUTE
except:
print_debug_exception()
return ERROR_NO_ATTRIBUTE
def calc_attribute_list(r, filter_level):
d = CalcFilteredDir(r, filter_level)
rs = set(d)
c = getattr_nothrow(r, '__class__')
if not c is ERROR_NO_ATTRIBUTE:
d = CalcFilteredDir(c, False)
cs = set(d)
s = rs & cs
for e in s:
o1 = getattr_nothrow(r, e)
o2 = getattr_nothrow(c, e)
if o1 is ERROR_NO_ATTRIBUTE or CalcIdentity(o1, filter_level) is CalcIdentity(o2, filter_level):
rs.discard(e)
try:
if filter_level == 1 and getattr(o1, '__self__') is getattr(o2, '__self__'):
rs.discard(e)
except:
pass
bl = getattr_nothrow(r, '__bases__')
if type(bl) == tuple:
for b in bl:
d = CalcFilteredDir(b, False)
bs = set(d)
s = rs & bs
for e in s:
o1 = getattr_nothrow(r, e)
o2 = getattr_nothrow(b, e)
if o1 is ERROR_NO_ATTRIBUTE or CalcIdentity(o1, filter_level) is CalcIdentity(o2, filter_level):
rs.discard(e)
try:
if filter_level == 1 and getattr(o1, '__self__') is getattr(o2, '__self__'):
rs.discard(e)
except:
pass
l = [a for a in rs if (filter_level < 2 or not IsFilteredAttribute2(r, a))]
if hasattr(r, '__class__') and not '__class__' in l:
l = ['__class__'] + l
if hasattr(r, '__bases__') and not '__bases__' in l:
l = ['__bases__'] + l
al = [a for a in l if hasattr(r, a)]
return al
class _RPDB2_FindRepr:
def __init__(self, o, repr_limit):
self.m_object = o
self.m_repr_limit = repr_limit
def __getitem__(self, key):
index = 0
for i in self.m_object:
if repr_ltd(i, self.m_repr_limit, encoding = ENCODING_RAW_I).replace('"', '"') == key:
if isinstance(self.m_object, dict):
return self.m_object[i]
return i
index += 1
if index > MAX_SORTABLE_LENGTH:
return None
def __setitem__(self, key, value):
if not isinstance(self.m_object, dict):
return
index = 0
for i in self.m_object:
if repr_ltd(i, self.m_repr_limit, encoding = ENCODING_RAW_I).replace('"', '"') == key:
self.m_object[i] = value
return
index += 1
if index > MAX_SORTABLE_LENGTH:
return
#
# Since on Python 3000 the comparison of different types raises exceptions and
# the __cmp__ method was removed, sorting of namespace items is based on
# lexicographic order except for numbers which are sorted normally and appear
# before all other types.
#
def sort(s):
if sys.version_info[:2] == (2, 3):
#
# On Python 2.3 the key parameter is not supported.
#
s.sort(sort_cmp)
return
s.sort(key = sort_key)
def sort_key(e):
if is_py3k() and isinstance(e, numbers.Number):
return (0, e)
if not is_py3k() and operator.isNumberType(e):
return (0, e)
return (1, repr_ltd(e, 256, encoding = ENCODING_RAW_I))
def sort_cmp(x, y):
skx = sort_key(x)
sky = sort_key(y)
return cmp(skx, sky)
def recalc_sys_path(old_pythonpath):
opl = old_pythonpath.split(os.path.pathsep)
del sys.path[1: 1 + len(opl)]
pythonpath = os.environ.get('PYTHONPATH', '')
ppl = pythonpath.split(os.path.pathsep)
for i, p in enumerate(ppl):
abspath = my_abspath(p)
lowered = winlower(abspath)
sys.path.insert(1 + i, lowered)
def calc_signame(signum):
for k, v in vars(signal).items():
if not k.startswith('SIG') or k in ['SIG_IGN', 'SIG_DFL', 'SIGRTMIN', 'SIGRTMAX']:
continue
if v == signum:
return k
return '?'
#
# Similar to traceback.extract_stack() but fixes path with calc_frame_path()
#
def my_extract_stack(f):
if f == None:
return []
try:
g_traceback_lock.acquire()
_s = traceback.extract_stack(f)
finally:
g_traceback_lock.release()
_s.reverse()
s = []
for (p, ln, fn, text) in _s:
path = as_unicode(calc_frame_path(f), sys.getfilesystemencoding())
if text == None:
text = ''
s.append((path, ln, as_unicode(fn), as_unicode(text)))
f = f.f_back
if f == None:
break
s.reverse()
return s
#
# Similar to traceback.extract_tb() but fixes path with calc_frame_path()
#
def my_extract_tb(tb):
try:
g_traceback_lock.acquire()
_s = traceback.extract_tb(tb)
finally:
g_traceback_lock.release()
s = []
for (p, ln, fn, text) in _s:
path = as_unicode(calc_frame_path(tb.tb_frame), sys.getfilesystemencoding())
if text == None:
text = ''
s.append((path, ln, as_unicode(fn), as_unicode(text)))
tb = tb.tb_next
if tb == None:
break
return s
def get_traceback(frame, ctx):
if is_py3k():
if ctx.get_exc_info() != None:
return ctx.get_exc_info()[2]
else:
if frame.f_exc_traceback != None:
return frame.f_exc_traceback
locals = copy.copy(frame.f_locals)
if not 'traceback' in locals:
return None
tb = locals['traceback']
if dir(tb) == ['tb_frame', 'tb_lasti', 'tb_lineno', 'tb_next']:
return tb
class CFirewallTest:
m_port = None
m_thread_server = None
m_thread_client = None
m_lock = threading.RLock()
def __init__(self, fremote = False, timeout = 4):
if fremote:
self.m_loopback = ''
else:
self.m_loopback = LOOPBACK
self.m_timeout = timeout
self.m_result = None
self.m_last_server_error = None
self.m_last_client_error = None
def run(self):
CFirewallTest.m_lock.acquire()
try:
#
# If either the server or client are alive after a timeout
# it means they are blocked by a firewall. Return False.
#
server = CFirewallTest.m_thread_server
if server != None and thread_is_alive(server):
server.join(self.m_timeout * 1.5)
if thread_is_alive(server):
return False
client = CFirewallTest.m_thread_client
if client != None and thread_is_alive(client):
client.join(self.m_timeout * 1.5)
if thread_is_alive(client):
return False
CFirewallTest.m_port = None
self.m_result = None
t0 = time.time()
server = threading.Thread(target = self.__server)
server.start()
CFirewallTest.m_thread_server = server
#
# If server exited or failed to setup after a timeout
# it means it was blocked by a firewall.
#
while CFirewallTest.m_port == None and thread_is_alive(server):
if time.time() - t0 > self.m_timeout * 1.5:
return False
time.sleep(0.1)
if not thread_is_alive(server):
return False
t0 = time.time()
client = threading.Thread(target = self.__client)
client.start()
CFirewallTest.m_thread_client = client
while self.m_result == None and thread_is_alive(client):
if time.time() - t0 > self.m_timeout * 1.5:
return False
time.sleep(0.1)
return self.m_result
finally:
CFirewallTest.m_lock.release()
def __client(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.m_timeout)
try:
try:
s.connect((LOOPBACK, CFirewallTest.m_port))
s.send(as_bytes('Hello, world'))
data = self.__recv(s, 1024)
self.m_result = True
except socket.error:
e = sys.exc_info()[1]
self.m_last_client_error = e
self.m_result = False
finally:
s.close()
def __server(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.m_timeout)
if os.name == POSIX:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = SERVER_PORT_RANGE_START
while True:
try:
s.bind((self.m_loopback, port))
break
except socket.error:
e = sys.exc_info()[1]
if self.__GetSocketError(e) != errno.EADDRINUSE:
self.m_last_server_error = e
s.close()
return
if port >= SERVER_PORT_RANGE_START + SERVER_PORT_RANGE_LENGTH - 1:
self.m_last_server_error = e
s.close()
return
port += 1
CFirewallTest.m_port = port
try:
try:
conn = None
s.listen(1)
conn, addr = s.accept()
while True:
data = self.__recv(conn, 1024)
if not data:
return
conn.send(data)
except socket.error:
e = sys.exc_info()[1]
self.m_last_server_error = e
finally:
if conn != None:
conn.close()
s.close()
def __recv(self, s, len):
t0 = time.time()
while True:
try:
data = s.recv(1024)
return data
except socket.error:
e = sys.exc_info()[1]
if self.__GetSocketError(e) != errno.EWOULDBLOCK:
print_debug('socket error was caught, %s' % repr(e))
raise
if time.time() - t0 > self.m_timeout:
raise
continue
def __GetSocketError(self, e):
if (not isinstance(e.args, tuple)) or (len(e.args) == 0):
return -1
return e.args[0]
#
# ---------------------------------- CThread ---------------------------------------
#
class CThread (threading.Thread):
m_fstop = False
m_threads = {}
m_lock = threading.RLock()
m_id = 0
def __init__(self, name = None, target = None, args = (), shutdown = None):
threading.Thread.__init__(self, name = name, target = target, args = args)
self.m_fstarted = False
self.m_shutdown_callback = shutdown
self.m_id = self.__getId()
def __del__(self):
#print_debug('Destructor called for ' + thread_get_name(self))
#threading.Thread.__del__(self)
if self.m_fstarted:
try:
del CThread.m_threads[self.m_id]
except KeyError:
pass
def start(self):
if CThread.m_fstop:
return
CThread.m_threads[self.m_id] = weakref.ref(self)
if CThread.m_fstop:
del CThread.m_threads[self.m_id]
return
self.m_fstarted = True
threading.Thread.start(self)
def run(self):
sys.settrace(None)
sys.setprofile(None)
threading.Thread.run(self)
def join(self, timeout = None):
try:
threading.Thread.join(self, timeout)
except AssertionError:
pass
def shutdown(self):
if self.m_shutdown_callback:
self.m_shutdown_callback()
def joinAll(cls):
print_debug('Shutting down debugger threads...')
CThread.m_fstop = True
for tid, w in list(CThread.m_threads.items()):
t = w()
if not t:
continue
try:
#print_debug('Calling shutdown of thread %s.' % thread_get_name(t))
t.shutdown()
except:
pass
t = None
t0 = time.time()
while len(CThread.m_threads) > 0:
if time.time() - t0 > SHUTDOWN_TIMEOUT:
print_debug('Shut down of debugger threads has TIMED OUT!')
return
#print_debug(repr(CThread.m_threads))
time.sleep(0.1)
print_debug('Shut down debugger threads, done.')
joinAll = classmethod(joinAll)
def clearJoin(cls):
CThread.m_fstop = False
clearJoin = classmethod(clearJoin)
def __getId(self):
CThread.m_lock.acquire()
id = CThread.m_id
CThread.m_id += 1
CThread.m_lock.release()
return id
#
#--------------------------------------- Crypto ---------------------------------------
#
class CCrypto:
"""
Handle authentication and encryption of data, using password protection.
"""
m_keys = {}
def __init__(self, _rpdb2_pwd, fAllowUnencrypted, rid):
assert(is_unicode(_rpdb2_pwd))
assert(is_unicode(rid))
self.m_rpdb2_pwd = _rpdb2_pwd
self.m_key = self.__calc_key(_rpdb2_pwd)
self.m_fAllowUnencrypted = fAllowUnencrypted
self.m_rid = rid
self.m_failure_lock = threading.RLock()
self.m_lock = threading.RLock()
self.m_index_anchor_in = random.randint(0, 1000000000)
self.m_index_anchor_ex = 0
self.m_index = 0
self.m_index_table = {}
self.m_index_table_size = INDEX_TABLE_SIZE
self.m_max_index = 0
def __calc_key(self, _rpdb2_pwd):
"""
Create and return a key from a password.
A Weak password means a weak key.
"""
if _rpdb2_pwd in CCrypto.m_keys:
return CCrypto.m_keys[_rpdb2_pwd]
key = as_bytes(_rpdb2_pwd)
suffix = key[:16]
d = hmac.new(key, digestmod = _md5)
#
# The following loop takes around a second to complete
# and should strengthen the password by ~12 bits.
# a good password is ~30 bits strong so we are looking
# at ~42 bits strong key
#
for i in range(2 ** 12):
d.update((key + suffix) * 16)
key = d.digest()
CCrypto.m_keys[_rpdb2_pwd] = key
return key
def set_index(self, i, anchor):
try:
self.m_lock.acquire()
self.m_index = i
self.m_index_anchor_ex = anchor
finally:
self.m_lock.release()
def get_max_index(self):
return self.m_max_index
def do_crypto(self, args, fencrypt):
"""
Sign args and possibly encrypt.
Return signed/encrypted string.
"""
if not fencrypt and not self.m_fAllowUnencrypted:
raise EncryptionExpected
if fencrypt and not is_encryption_supported():
raise EncryptionNotSupported
(digest, s) = self.__sign(args)
fcompress = False
if len(s) > 50000:
_s = zlib.compress(s)
if len(_s) < len(s) * 0.4:
s = _s
fcompress = True
if fencrypt:
s = self.__encrypt(s)
s = base64.encodestring(s)
u = as_unicode(s)
return (fcompress, digest, u)
def undo_crypto(self, fencrypt, fcompress, digest, msg, fVerifyIndex = True):
"""
Take crypto string, verify its signature and decrypt it, if
needed.
"""
if not fencrypt and not self.m_fAllowUnencrypted:
raise EncryptionExpected
if fencrypt and not is_encryption_supported():
raise EncryptionNotSupported
s = as_bytes(msg)
s = base64.decodestring(s)
if fencrypt:
s = self.__decrypt(s)
if fcompress:
s = zlib.decompress(s)
args, id = self.__verify_signature(digest, s, fVerifyIndex)
return (args, id)
def __encrypt(self, s):
s_padded = s + as_bytes('\x00') * (DES.block_size - (len(s) % DES.block_size))
key_padded = (self.m_key + as_bytes('0') * (DES.key_size - (len(self.m_key) % DES.key_size)))[:DES.key_size]
iv = '0' * DES.block_size
d = DES.new(key_padded, DES.MODE_CBC, iv)
r = d.encrypt(s_padded)
return r
def __decrypt(self, s):
try:
key_padded = (self.m_key + as_bytes('0') * (DES.key_size - (len(self.m_key) % DES.key_size)))[:DES.key_size]
iv = '0' * DES.block_size
d = DES.new(key_padded, DES.MODE_CBC, iv)
_s = d.decrypt(s).strip(as_bytes('\x00'))
return _s
except:
self.__wait_a_little()
raise DecryptionFailure
def __sign(self, args):
i = self.__get_next_index()
pack = (self.m_index_anchor_ex, i, self.m_rid, args)
#print_debug('***** 1' + repr(args)[:50])
s = pickle.dumps(pack, 2)
#print_debug('***** 2' + repr(args)[:50])
h = hmac.new(self.m_key, s, digestmod = _md5)
d = h.hexdigest()
#if 'coding:' in s:
# print_debug('%s, %s, %s\n\n==========\n\n%s' % (len(s), d, repr(args), repr(s)))
return (d, s)
def __get_next_index(self):
try:
self.m_lock.acquire()
self.m_index += 1
return self.m_index
finally:
self.m_lock.release()
def __verify_signature(self, digest, s, fVerifyIndex):
try:
h = hmac.new(self.m_key, s, digestmod = _md5)
d = h.hexdigest()
#if 'coding:' in s:
# print_debug('%s, %s, %s, %s' % (len(s), digest, d, repr(s)))
if d != digest:
self.__wait_a_little()
raise AuthenticationFailure
pack = pickle.loads(s)
(anchor, i, id, args) = pack
except AuthenticationFailure:
raise
except:
print_debug_exception()
self.__wait_a_little()
raise AuthenticationBadData
if fVerifyIndex:
self.__verify_index(anchor, i, id)
return args, id
def __verify_index(self, anchor, i, id):
"""
Manage messages ids to prevent replay of old messages.
"""
try:
try:
self.m_lock.acquire()
if anchor != self.m_index_anchor_in:
raise AuthenticationBadIndex(self.m_max_index, self.m_index_anchor_in)
if i > self.m_max_index + INDEX_TABLE_SIZE // 2:
raise AuthenticationBadIndex(self.m_max_index, self.m_index_anchor_in)
i_mod = i % INDEX_TABLE_SIZE
(iv, idl) = self.m_index_table.get(i_mod, (None, None))
#print >> sys.__stderr__, i, i_mod, iv, self.m_max_index
if (iv is None) or (i > iv):
idl = [id]
elif (iv == i) and (not id in idl):
idl.append(id)
else:
raise AuthenticationBadIndex(self.m_max_index, self.m_index_anchor_in)
self.m_index_table[i_mod] = (i, idl)
if i > self.m_max_index:
self.m_max_index = i
return self.m_index
finally:
self.m_lock.release()
except:
self.__wait_a_little()
raise
def __wait_a_little(self):
self.m_failure_lock.acquire()
time.sleep((1.0 + random.random()) / 2)
self.m_failure_lock.release()
#
# --------------------------------- Events List --------------------------
#
class CEvent(object):
"""
Base class for events.
"""
def __reduce__(self):
rv = (copy_reg.__newobj__, (type(self), ), vars(self), None, None)
return rv
def is_match(self, arg):
pass
class CEventNull(CEvent):
"""
Sent to release event listeners (Internal, speeds up shutdown).
"""
pass
class CEventEmbeddedSync(CEvent):
"""
Sent when an embedded interpreter becomes active if it needs to
determine if there are pending break requests. (Internal)
"""
pass
class CEventClearSourceCache(CEvent):
"""
Sent when the source cache is cleared.
"""
pass
class CEventSignalIntercepted(CEvent):
"""
This event is sent when a signal is intercepted inside tracing code.
Such signals are held pending until tracing code is returned from.
"""
def __init__(self, signum):
self.m_signum = signum
self.m_signame = calc_signame(signum)
class CEventSignalException(CEvent):
"""
This event is sent when the handler of a previously intercepted signal
raises an exception. Such exceptions are ignored because of technical
limitations.
"""
def __init__(self, signum, description):
self.m_signum = signum
self.m_signame = calc_signame(signum)
self.m_description = description
class CEventEncoding(CEvent):
"""
The encoding has been set.
"""
def __init__(self, encoding, fraw):
self.m_encoding = encoding
self.m_fraw = fraw
class CEventPsycoWarning(CEvent):
"""
The psyco module was detected. rpdb2 is incompatible with this module.
"""
pass
class CEventConflictingModules(CEvent):
"""
Conflicting modules were detected. rpdb2 is incompatible with these modules.
"""
def __init__(self, modules_list):
self.m_modules_list = modules_list
class CEventSyncReceivers(CEvent):
"""
A base class for events that need to be received by all listeners at
the same time. The synchronization mechanism is internal to rpdb2.
"""
def __init__(self, sync_n):
self.m_sync_n = sync_n
class CEventForkSwitch(CEventSyncReceivers):
"""
Debuggee is about to fork. Try to reconnect.
"""
pass
class CEventExecSwitch(CEventSyncReceivers):
"""
Debuggee is about to exec. Try to reconnect.
"""
pass
class CEventExit(CEvent):
"""
Debuggee is terminating.
"""
pass
class CEventState(CEvent):
"""
State of the debugger.
Value of m_state can be one of the STATE_* globals.
"""
def __init__(self, state):
self.m_state = as_unicode(state)
def is_match(self, arg):
return self.m_state == as_unicode(arg)
class CEventSynchronicity(CEvent):
"""
Mode of synchronicity.
Sent when mode changes.
"""
def __init__(self, fsynchronicity):
self.m_fsynchronicity = fsynchronicity
def is_match(self, arg):
return self.m_fsynchronicity == arg
class CEventTrap(CEvent):
"""
Mode of "trap unhandled exceptions".
Sent when the mode changes.
"""
def __init__(self, ftrap):
self.m_ftrap = ftrap
def is_match(self, arg):
return self.m_ftrap == arg
class CEventForkMode(CEvent):
"""
Mode of fork behavior has changed.
Sent when the mode changes.
"""
def __init__(self, ffork_into_child, ffork_auto):
self.m_ffork_into_child = ffork_into_child
self.m_ffork_auto = ffork_auto
class CEventUnhandledException(CEvent):
"""
Unhandled Exception
Sent when an unhandled exception is caught.
"""
class CEventNamespace(CEvent):
"""
Namespace has changed.
This tells the debugger it should query the namespace again.
"""
pass
class CEventNoThreads(CEvent):
"""
No threads to debug.
Debuggee notifies the debugger that it has no threads. This can
happen in embedded debugging and in a python interpreter session.
"""
pass
class CEventThreads(CEvent):
"""
State of threads.
"""
def __init__(self, _current_thread, thread_list):
self.m_current_thread = _current_thread
self.m_thread_list = thread_list
class CEventThreadBroken(CEvent):
"""
A thread has broken.
"""
def __init__(self, tid, name):
self.m_tid = tid
self.m_name = as_unicode(name)
class CEventStack(CEvent):
"""
Stack of current thread.
"""
def __init__(self, stack):
self.m_stack = stack
class CEventStackFrameChange(CEvent):
"""
Stack frame has changed.
This event is sent when the debugger goes up or down the stack.
"""
def __init__(self, frame_index):
self.m_frame_index = frame_index
class CEventStackDepth(CEvent):
"""
Stack depth has changed.
"""
def __init__(self, stack_depth, stack_depth_exception):
self.m_stack_depth = stack_depth
self.m_stack_depth_exception = stack_depth_exception
class CEventBreakpoint(CEvent):
"""
A breakpoint or breakpoints changed.
"""
DISABLE = as_unicode('disable')
ENABLE = as_unicode('enable')
REMOVE = as_unicode('remove')
SET = as_unicode('set')
def __init__(self, bp, action = SET, id_list = [], fAll = False):
self.m_bp = breakpoint_copy(bp)
self.m_action = action
self.m_id_list = id_list
self.m_fAll = fAll
class CEventSync(CEvent):
"""
Internal (not sent to the debugger) event that trigers the
firing of other events that help the debugger synchronize with
the state of the debuggee.
"""
def __init__(self, fException, fSendUnhandled):
self.m_fException = fException
self.m_fSendUnhandled = fSendUnhandled
#
# --------------------------------- Event Manager --------------------------
#
class CEventDispatcherRecord:
"""
Internal structure that binds a callback to particular events.
"""
def __init__(self, callback, event_type_dict, fSingleUse):
self.m_callback = callback
self.m_event_type_dict = copy.copy(event_type_dict)
self.m_fSingleUse = fSingleUse
def is_match(self, event):
rtl = [t for t in self.m_event_type_dict.keys() if isinstance(event, t)]
if len(rtl) == 0:
return False
#
# Examine first match only.
#
rt = rtl[0]
rte = self.m_event_type_dict[rt].get(EVENT_EXCLUDE, [])
if len(rte) != 0:
for e in rte:
if event.is_match(e):
return False
return True
rte = self.m_event_type_dict[rt].get(EVENT_INCLUDE, [])
if len(rte) != 0:
for e in rte:
if event.is_match(e):
return True
return False
return True
class CEventDispatcher:
"""
Events dispatcher.
Dispatchers can be chained together.
"""
def __init__(self, chained_event_dispatcher = None):
self.m_chained_event_dispatcher = chained_event_dispatcher
self.m_chain_override_types = {}
self.m_registrants = {}
def shutdown(self):
for er in list(self.m_registrants.keys()):
self.__remove_dispatcher_record(er)
def register_callback(self, callback, event_type_dict, fSingleUse):
er = CEventDispatcherRecord(callback, event_type_dict, fSingleUse)
#
# If we have a chained dispatcher, register the callback on the
# chained dispatcher as well.
#
if self.m_chained_event_dispatcher is not None:
_er = self.__register_callback_on_chain(er, event_type_dict, fSingleUse)
self.m_registrants[er] = _er
return er
self.m_registrants[er] = True
return er
def remove_callback(self, callback):
erl = [er for er in list(self.m_registrants.keys()) if er.m_callback == callback]
for er in erl:
self.__remove_dispatcher_record(er)
def fire_events(self, event_list):
for event in event_list:
self.fire_event(event)
def fire_event(self, event):
for er in list(self.m_registrants.keys()):
self.__fire_er(event, er)
def __fire_er(self, event, er):
if not er.is_match(event):
return
try:
er.m_callback(event)
except:
pass
if not er.m_fSingleUse:
return
try:
del self.m_registrants[er]
except KeyError:
pass
def register_chain_override(self, event_type_dict):
"""
Chain override prevents registration on chained
dispatchers for specific event types.
"""
for t in list(event_type_dict.keys()):
self.m_chain_override_types[t] = True
def __register_callback_on_chain(self, er, event_type_dict, fSingleUse):
_event_type_dict = copy.copy(event_type_dict)
for t in self.m_chain_override_types:
if t in _event_type_dict:
del _event_type_dict[t]
if len(_event_type_dict) == 0:
return False
def callback(event, er = er):
self.__fire_er(event, er)
_er = self.m_chained_event_dispatcher.register_callback(callback, _event_type_dict, fSingleUse)
return _er
def __remove_dispatcher_record(self, er):
try:
if self.m_chained_event_dispatcher is not None:
_er = self.m_registrants[er]
if _er != False:
self.m_chained_event_dispatcher.__remove_dispatcher_record(_er)
del self.m_registrants[er]
except KeyError:
pass
class CEventQueue:
"""
Add queue semantics above an event dispatcher.
Instead of firing event callbacks, new events are returned in a list
upon request.
"""
def __init__(self, event_dispatcher, max_event_list_length = MAX_EVENT_LIST_LENGTH):
self.m_event_dispatcher = event_dispatcher
self.m_event_lock = threading.Condition()
self.m_max_event_list_length = max_event_list_length
self.m_event_list = []
self.m_event_index = 0
self.m_n_waiters = []
def shutdown(self):
self.m_event_dispatcher.remove_callback(self.event_handler)
def register_event_types(self, event_type_dict):
self.m_event_dispatcher.register_callback(self.event_handler, event_type_dict, fSingleUse = False)
def event_handler(self, event):
try:
self.m_event_lock.acquire()
if isinstance(event, CEventSyncReceivers):
t0 = time.time()
while len(self.m_n_waiters) < event.m_sync_n and time.time() < t0 + HEARTBEAT_TIMEOUT:
time.sleep(0.1)
self.m_event_list.append(event)
if len(self.m_event_list) > self.m_max_event_list_length:
self.m_event_list.pop(0)
self.m_event_index += 1
lock_notify_all(self.m_event_lock)
finally:
self.m_event_lock.release()
def get_event_index(self):
return self.m_event_index
def wait_for_event(self, timeout, event_index):
"""
Return the new events which were fired.
"""
try:
self.m_n_waiters.append(0)
self.m_event_lock.acquire()
if event_index >= self.m_event_index:
safe_wait(self.m_event_lock, timeout)
if event_index >= self.m_event_index:
return (self.m_event_index, [])
sub_event_list = self.m_event_list[event_index - self.m_event_index:]
return (self.m_event_index, sub_event_list)
finally:
self.m_n_waiters.pop()
self.m_event_lock.release()
class CStateManager:
"""
Manage possible debugger states (broken, running, etc...)
The state manager can receive state changes via an input event
dispatcher or via the set_state() method
It sends state changes forward to the output event dispatcher.
The state can also be queried or waited for.
"""
def __init__(self, initial_state, event_dispatcher_output = None, event_dispatcher_input = None):
self.m_event_dispatcher_input = event_dispatcher_input
self.m_event_dispatcher_output = event_dispatcher_output
if self.m_event_dispatcher_input is not None:
event_type_dict = {CEventState: {}}
self.m_event_dispatcher_input.register_callback(self.event_handler, event_type_dict, fSingleUse = False)
if self.m_event_dispatcher_output is not None:
self.m_event_dispatcher_output.register_chain_override(event_type_dict)
self.m_state_lock = threading.Condition()
self.m_state_queue = []
self.m_state_index = 0
self.m_waiter_list = {}
self.set_state(initial_state)
def shutdown(self):
if self.m_event_dispatcher_input is not None:
self.m_event_dispatcher_input.remove_callback(self.event_handler)
def event_handler(self, event):
self.set_state(event.m_state)
def get_state(self):
return self.m_state_queue[-1]
def __add_state(self, state):
self.m_state_queue.append(state)
self.m_state_index += 1
self.__remove_states()
def __remove_states(self, treshold = None):
"""
Clean up old state changes from the state queue.
"""
index = self.__calc_min_index()
if (treshold is not None) and (index <= treshold):
return
_delta = 1 + self.m_state_index - index
self.m_state_queue = self.m_state_queue[-_delta:]
def __calc_min_index(self):
"""
Calc the minimum state index.
The calculated index is the oldest state of which all state
waiters are aware of. That is, no one cares for older states
and these can be removed from the state queue.
"""
if len(self.m_waiter_list) == 0:
return self.m_state_index
index_list = list(self.m_waiter_list.keys())
min_index = min(index_list)
return min_index
def __add_waiter(self):
index = self.m_state_index
n = self.m_waiter_list.get(index, 0)
self.m_waiter_list[index] = n + 1
return index
def __remove_waiter(self, index):
n = self.m_waiter_list[index]
if n == 1:
del self.m_waiter_list[index]
self.__remove_states(index)
else:
self.m_waiter_list[index] = n - 1
def __get_states(self, index):
_delta = 1 + self.m_state_index - index
states = self.m_state_queue[-_delta:]
return states
def set_state(self, state = None, fLock = True):
try:
if fLock:
self.m_state_lock.acquire()
if state is None:
state = self.get_state()
self.__add_state(state)
lock_notify_all(self.m_state_lock)
finally:
if fLock:
self.m_state_lock.release()
if self.m_event_dispatcher_output is not None:
event = CEventState(state)
self.m_event_dispatcher_output.fire_event(event)
def wait_for_state(self, state_list):
"""
Wait for any of the states in the state list.
"""
try:
self.m_state_lock.acquire()
if self.get_state() in state_list:
return self.get_state()
while True:
index = self.__add_waiter()
alertable_wait(self.m_state_lock, PING_TIMEOUT)
states = self.__get_states(index)
self.__remove_waiter(index)
for state in states:
if state in state_list:
return state
finally:
self.m_state_lock.release()
def acquire(self):
self.m_state_lock.acquire()
def release(self):
self.m_state_lock.release()
#
# -------------------------------------- Break Info manager ---------------------------------------
#
def myord(c):
try:
return ord(c)
except:
return c
def CalcValidLines(code):
l = code.co_firstlineno
vl = [l]
bl = [myord(c) for c in code.co_lnotab[2::2]]
sl = [myord(c) for c in code.co_lnotab[1::2]]
for (bi, si) in zip(bl, sl):
l += si
if bi == 0:
continue
if l != vl[-1]:
vl.append(l)
if len(sl) > 0:
l += sl[-1]
if l != vl[-1]:
vl.append(l)
return vl
class CScopeBreakInfo:
def __init__(self, fqn, valid_lines):
self.m_fqn = fqn
self.m_first_line = valid_lines[0]
self.m_last_line = valid_lines[-1]
self.m_valid_lines = valid_lines
def CalcScopeLine(self, lineno):
rvl = copy.copy(self.m_valid_lines)
rvl.reverse()
for l in rvl:
if lineno >= l:
break
return l
def __str__(self):
return "('" + self.m_fqn + "', " + str(self.m_valid_lines) + ')'
class CFileBreakInfo:
"""
Break info structure for a source file.
"""
def __init__(self, filename):
self.m_filename = filename
self.m_first_line = 0
self.m_last_line = 0
self.m_scope_break_info = []
def CalcBreakInfo(self):
(source, encoding) = get_source(self.m_filename)
_source = as_string(source + as_unicode('\n'), encoding)
code = compile(_source, self.m_filename, "exec")
self.m_scope_break_info = []
self.m_first_line = code.co_firstlineno
self.m_last_line = 0
fqn = []
t = [code]
while len(t) > 0:
c = t.pop(0)
if type(c) == tuple:
self.m_scope_break_info.append(CScopeBreakInfo(*c))
fqn.pop()
continue
fqn = fqn + [c.co_name]
valid_lines = CalcValidLines(c)
self.m_last_line = max(self.m_last_line, valid_lines[-1])
_fqn = as_unicode('.'.join(fqn), encoding)
si = (_fqn, valid_lines)
subcodeslist = self.__CalcSubCodesList(c)
t = subcodeslist + [si] + t
def __CalcSubCodesList(self, code):
tc = type(code)
t = [(c.co_firstlineno, c) for c in code.co_consts if type(c) == tc]
t.sort()
scl = [c[1] for c in t]
return scl
def FindScopeByLineno(self, lineno):
lineno = max(min(lineno, self.m_last_line), self.m_first_line)
smaller_element = None
exact_element = None
for sbi in self.m_scope_break_info:
if lineno > sbi.m_last_line:
if (smaller_element is None) or (sbi.m_last_line >= smaller_element.m_last_line):
smaller_element = sbi
continue
if (lineno >= sbi.m_first_line) and (lineno <= sbi.m_last_line):
exact_element = sbi
break
assert(exact_element is not None)
scope = exact_element
l = exact_element.CalcScopeLine(lineno)
if (smaller_element is not None) and (l <= smaller_element.m_last_line):
scope = smaller_element
l = smaller_element.CalcScopeLine(lineno)
return (scope, l)
def FindScopeByName(self, name, offset):
if name.startswith(MODULE_SCOPE):
alt_scope = MODULE_SCOPE2 + name[len(MODULE_SCOPE):]
elif name.startswith(MODULE_SCOPE2):
alt_scope = MODULE_SCOPE + name[len(MODULE_SCOPE2):]
else:
return self.FindScopeByName(MODULE_SCOPE2 + SCOPE_SEP + name, offset)
for sbi in self.m_scope_break_info:
if sbi.m_fqn in [name, alt_scope]:
l = sbi.CalcScopeLine(sbi.m_first_line + offset)
return (sbi, l)
print_debug('Invalid scope: %s' % repr(name))
raise InvalidScopeName
class CBreakInfoManager:
"""
Manage break info dictionary per filename.
"""
def __init__(self):
self.m_file_info_dic = {}
def addFile(self, filename):
mbi = CFileBreakInfo(filename)
mbi.CalcBreakInfo()
self.m_file_info_dic[filename] = mbi
def getFile(self, filename):
if not filename in self.m_file_info_dic:
self.addFile(filename)
return self.m_file_info_dic[filename]
#
# -------------------------------- Break Point Manager -----------------------------
#
def breakpoint_copy(bp):
if bp is None:
return None
_bp = copy.copy(bp)
#filename = g_found_unicode_files.get(bp.m_filename, bp.m_filename)
_bp.m_filename = as_unicode(bp.m_filename, sys.getfilesystemencoding())
_bp.m_code = None
return _bp
class CBreakPoint(object):
def __init__(self, filename, scope_fqn, scope_first_line, lineno, fEnabled, expr, encoding, fTemporary = False):
"""
Breakpoint constructor.
scope_fqn - scope fully qualified name. e.g: module.class.method
"""
self.m_id = None
self.m_fEnabled = fEnabled
self.m_filename = filename
self.m_scope_fqn = scope_fqn
self.m_scope_name = scope_fqn.split(SCOPE_SEP)[-1]
self.m_scope_first_line = scope_first_line
self.m_scope_offset = lineno - scope_first_line
self.m_lineno = lineno
self.m_expr = expr
self.m_encoding = encoding
self.m_code = None
self.m_fTemporary = fTemporary
if (expr is not None) and (expr != ''):
_expr = as_bytes(ENCODING_SOURCE % encoding + expr, encoding)
print_debug('Breakpoint expression: %s' % repr(_expr))
self.m_code = compile(_expr, '<string>', 'eval')
def __reduce__(self):
rv = (copy_reg.__newobj__, (type(self), ), vars(self), None, None)
return rv
def calc_enclosing_scope_name(self):
if self.m_scope_offset != 0:
return None
if self.m_scope_fqn in [MODULE_SCOPE, MODULE_SCOPE2]:
return None
scope_name_list = self.m_scope_fqn.split(SCOPE_SEP)
enclosing_scope_name = scope_name_list[-2]
return enclosing_scope_name
def enable(self):
self.m_fEnabled = True
def disable(self):
self.m_fEnabled = False
def isEnabled(self):
return self.m_fEnabled
def __str__(self):
return "('" + self.m_filename + "', '" + self.m_scope_fqn + "', " + str(self.m_scope_first_line) + ', ' + str(self.m_scope_offset) + ', ' + str(self.m_lineno) + ')'
class CBreakPointsManagerProxy:
"""
A proxy for the breakpoint manager.
While the breakpoint manager resides on the debuggee (the server),
the proxy resides in the debugger (the client - session manager)
"""
def __init__(self, session_manager):
self.m_session_manager = session_manager
self.m_break_points_by_file = {}
self.m_break_points_by_id = {}
self.m_lock = threading.Lock()
#
# The breakpoint proxy inserts itself between the two chained
# event dispatchers in the session manager.
#
event_type_dict = {CEventBreakpoint: {}}
self.m_session_manager.m_event_dispatcher_proxy.register_callback(self.update_bp, event_type_dict, fSingleUse = False)
self.m_session_manager.m_event_dispatcher.register_chain_override(event_type_dict)
def update_bp(self, event):
"""
Handle breakpoint updates that arrive via the event dispatcher.
"""
try:
self.m_lock.acquire()
if event.m_fAll:
id_list = list(self.m_break_points_by_id.keys())
else:
id_list = event.m_id_list
if event.m_action == CEventBreakpoint.REMOVE:
for id in id_list:
try:
bp = self.m_break_points_by_id.pop(id)
bpm = self.m_break_points_by_file[bp.m_filename]
del bpm[bp.m_lineno]
if len(bpm) == 0:
del self.m_break_points_by_file[bp.m_filename]
except KeyError:
pass
return
if event.m_action == CEventBreakpoint.DISABLE:
for id in id_list:
try:
bp = self.m_break_points_by_id[id]
bp.disable()
except KeyError:
pass
return
if event.m_action == CEventBreakpoint.ENABLE:
for id in id_list:
try:
bp = self.m_break_points_by_id[id]
bp.enable()
except KeyError:
pass
return
bpm = self.m_break_points_by_file.get(event.m_bp.m_filename, {})
bpm[event.m_bp.m_lineno] = event.m_bp
self.m_break_points_by_id[event.m_bp.m_id] = event.m_bp
finally:
self.m_lock.release()
self.m_session_manager.m_event_dispatcher.fire_event(event)
def sync(self):
try:
self.m_lock.acquire()
self.m_break_points_by_file = {}
self.m_break_points_by_id = {}
finally:
self.m_lock.release()
break_points_by_id = self.m_session_manager.getSession().getProxy().get_breakpoints()
try:
self.m_lock.acquire()
self.m_break_points_by_id.update(break_points_by_id)
for bp in list(self.m_break_points_by_id.values()):
bpm = self.m_break_points_by_file.get(bp.m_filename, {})
bpm[bp.m_lineno] = bp
finally:
self.m_lock.release()
def clear(self):
try:
self.m_lock.acquire()
self.m_break_points_by_file = {}
self.m_break_points_by_id = {}
finally:
self.m_lock.release()
def get_breakpoints(self):
return self.m_break_points_by_id
def get_breakpoint(self, filename, lineno):
bpm = self.m_break_points_by_file[filename]
bp = bpm[lineno]
return bp
class CBreakPointsManager:
def __init__(self):
self.m_break_info_manager = CBreakInfoManager()
self.m_active_break_points_by_file = {}
self.m_break_points_by_function = {}
self.m_break_points_by_file = {}
self.m_break_points_by_id = {}
self.m_lock = threading.Lock()
self.m_temp_bp = None
self.m_fhard_tbp = False
def get_active_break_points_by_file(self, filename):
"""
Get active breakpoints for file.
"""
_filename = winlower(filename)
return self.m_active_break_points_by_file.setdefault(_filename, {})
def __calc_active_break_points_by_file(self, filename):
bpmpt = self.m_active_break_points_by_file.setdefault(filename, {})
bpmpt.clear()
bpm = self.m_break_points_by_file.get(filename, {})
for bp in list(bpm.values()):
if bp.m_fEnabled:
bpmpt[bp.m_lineno] = bp
tbp = self.m_temp_bp
if (tbp is not None) and (tbp.m_filename == filename):
bpmpt[tbp.m_lineno] = tbp
def __remove_from_function_list(self, bp):
function_name = bp.m_scope_name
try:
bpf = self.m_break_points_by_function[function_name]
del bpf[bp]
if len(bpf) == 0:
del self.m_break_points_by_function[function_name]
except KeyError:
pass
#
# In some cases a breakpoint belongs to two scopes at the
# same time. For example a breakpoint on the declaration line
# of a function.
#
_function_name = bp.calc_enclosing_scope_name()
if _function_name is None:
return
try:
_bpf = self.m_break_points_by_function[_function_name]
del _bpf[bp]
if len(_bpf) == 0:
del self.m_break_points_by_function[_function_name]
except KeyError:
pass
def __add_to_function_list(self, bp):
function_name = bp.m_scope_name
bpf = self.m_break_points_by_function.setdefault(function_name, {})
bpf[bp] = True
#
# In some cases a breakpoint belongs to two scopes at the
# same time. For example a breakpoint on the declaration line
# of a function.
#
_function_name = bp.calc_enclosing_scope_name()
if _function_name is None:
return
_bpf = self.m_break_points_by_function.setdefault(_function_name, {})
_bpf[bp] = True
def get_breakpoint(self, filename, lineno):
"""
Get breakpoint by file and line number.
"""
bpm = self.m_break_points_by_file[filename]
bp = bpm[lineno]
return bp
def del_temp_breakpoint(self, fLock = True, breakpoint = None):
"""
Delete a temoporary breakpoint.
A temporary breakpoint is used when the debugger is asked to
run-to a particular line.
Hard temporary breakpoints are deleted only when actually hit.
"""
if self.m_temp_bp is None:
return
try:
if fLock:
self.m_lock.acquire()
if self.m_temp_bp is None:
return
if self.m_fhard_tbp and not breakpoint is self.m_temp_bp:
return
bp = self.m_temp_bp
self.m_temp_bp = None
self.m_fhard_tbp = False
self.__remove_from_function_list(bp)
self.__calc_active_break_points_by_file(bp.m_filename)
finally:
if fLock:
self.m_lock.release()
def set_temp_breakpoint(self, filename, scope, lineno, fhard = False):
"""
Set a temoporary breakpoint.
A temporary breakpoint is used when the debugger is asked to
run-to a particular line.
Hard temporary breakpoints are deleted only when actually hit.
"""
_filename = winlower(filename)
mbi = self.m_break_info_manager.getFile(_filename)
if scope != '':
(s, l) = mbi.FindScopeByName(scope, lineno)
else:
(s, l) = mbi.FindScopeByLineno(lineno)
bp = CBreakPoint(_filename, s.m_fqn, s.m_first_line, l, fEnabled = True, expr = as_unicode(''), encoding = as_unicode('utf-8'), fTemporary = True)
try:
self.m_lock.acquire()
self.m_fhard_tbp = False
self.del_temp_breakpoint(fLock = False)
self.m_fhard_tbp = fhard
self.m_temp_bp = bp
self.__add_to_function_list(bp)
self.__calc_active_break_points_by_file(bp.m_filename)
finally:
self.m_lock.release()
def set_breakpoint(self, filename, scope, lineno, fEnabled, expr, encoding):
"""
Set breakpoint.
scope - a string (possibly empty) with the dotted scope of the
breakpoint. eg. 'my_module.my_class.foo'
expr - a string (possibly empty) with a python expression
that will be evaluated at the scope of the breakpoint.
The breakpoint will be hit if the expression evaluates
to True.
"""
_filename = winlower(filename)
mbi = self.m_break_info_manager.getFile(_filename)
if scope != '':
(s, l) = mbi.FindScopeByName(scope, lineno)
else:
(s, l) = mbi.FindScopeByLineno(lineno)
bp = CBreakPoint(_filename, s.m_fqn, s.m_first_line, l, fEnabled, expr, encoding)
try:
self.m_lock.acquire()
bpm = self.m_break_points_by_file.setdefault(_filename, {})
#
# If a breakpoint on the same line is found we use its ID.
# Since the debugger lists breakpoints by IDs, this has
# a similar effect to modifying the breakpoint.
#
try:
old_bp = bpm[l]
id = old_bp.m_id
self.__remove_from_function_list(old_bp)
except KeyError:
#
# Find the smallest available ID.
#
bpids = list(self.m_break_points_by_id.keys())
bpids.sort()
id = 0
while id < len(bpids):
if bpids[id] != id:
break
id += 1
bp.m_id = id
self.m_break_points_by_id[id] = bp
bpm[l] = bp
if fEnabled:
self.__add_to_function_list(bp)
self.__calc_active_break_points_by_file(bp.m_filename)
return bp
finally:
self.m_lock.release()
def disable_breakpoint(self, id_list, fAll):
"""
Disable breakpoint.
"""
try:
self.m_lock.acquire()
if fAll:
id_list = list(self.m_break_points_by_id.keys())
for id in id_list:
try:
bp = self.m_break_points_by_id[id]
except KeyError:
continue
bp.disable()
self.__remove_from_function_list(bp)
self.__calc_active_break_points_by_file(bp.m_filename)
finally:
self.m_lock.release()
def enable_breakpoint(self, id_list, fAll):
"""
Enable breakpoint.
"""
try:
self.m_lock.acquire()
if fAll:
id_list = list(self.m_break_points_by_id.keys())
for id in id_list:
try:
bp = self.m_break_points_by_id[id]
except KeyError:
continue
bp.enable()
self.__add_to_function_list(bp)
self.__calc_active_break_points_by_file(bp.m_filename)
finally:
self.m_lock.release()
def delete_breakpoint(self, id_list, fAll):
"""
Delete breakpoint.
"""
try:
self.m_lock.acquire()
if fAll:
id_list = list(self.m_break_points_by_id.keys())
for id in id_list:
try:
bp = self.m_break_points_by_id[id]
except KeyError:
continue
filename = bp.m_filename
lineno = bp.m_lineno
bpm = self.m_break_points_by_file[filename]
if bp == bpm[lineno]:
del bpm[lineno]
if len(bpm) == 0:
del self.m_break_points_by_file[filename]
self.__remove_from_function_list(bp)
self.__calc_active_break_points_by_file(bp.m_filename)
del self.m_break_points_by_id[id]
finally:
self.m_lock.release()
def get_breakpoints(self):
return self.m_break_points_by_id
#
# ----------------------------------- Core Debugger ------------------------------------
#
class CCodeContext:
"""
Class represents info related to code objects.
"""
def __init__(self, frame, bp_manager):
self.m_code = frame.f_code
self.m_filename = calc_frame_path(frame)
self.m_basename = os.path.basename(self.m_filename)
self.m_file_breakpoints = bp_manager.get_active_break_points_by_file(self.m_filename)
self.m_fExceptionTrap = False
def is_untraced(self):
"""
Return True if this code object should not be traced.
"""
return self.m_basename in [THREADING_FILENAME, DEBUGGER_FILENAME]
def is_exception_trap_frame(self):
"""
Return True if this frame should be a trap for unhandled
exceptions.
"""
if self.m_basename == THREADING_FILENAME:
return True
if self.m_basename == DEBUGGER_FILENAME and self.m_code.co_name in ['__execv', '__execve', '__function_wrapper']:
return True
return False
class CDebuggerCoreThread:
"""
Class represents a debugged thread.
This is a core structure of the debugger. It includes most of the
optimization tricks and hacks, and includes a good amount of
subtle bug fixes, be carefull not to mess it up...
"""
def __init__(self, name, core_debugger, frame, event):
self.m_thread_id = thread.get_ident()
self.m_thread_name = name
self.m_fBroken = False
self.m_fUnhandledException = False
self.m_frame = frame
self.m_event = event
self.m_ue_lineno = None
self.m_uef_lineno = None
self.m_code_context = core_debugger.get_code_context(frame)
self.m_locals_copy = {}
self.m_core = core_debugger
self.m_bp_manager = core_debugger.m_bp_manager
self.m_frame_lock = threading.Condition()
self.m_frame_external_references = 0
self.m_exc_info = None
self.m_depth = 0
self.set_depth(frame)
def set_depth(self, frame):
self.m_depth = 0
while frame is not None:
self.m_depth += 1
frame = frame.f_back
def profile_recursion(self, frame, event, arg):
if event == 'call':
if self.m_depth > g_recursionlimit:
print_debug('Exceeded recursion limit and caught in profile function.')
try:
#
# The allowed recursion limit was exceeded.
# To view the offending script frame, go two frames
# down the stack with the 'down' console command.
#
raise RuntimeError('maximum recursion depth exceeded')
except:
#
# Schedule the debugger to re-enable the profile hook.
#
self.set_tracers(fsignal_exception = True)
raise
elif event == 'return':
return self.profile(frame, event, arg)
def profile(self, frame, event, arg):
"""
Profiler method.
The Python profiling mechanism is used by the debugger
mainly to handle synchronization issues related to the
life time of the frame structure.
"""
#print_debug('profile: %s, %s, %s, %s, %s' % (repr(frame), event, frame.f_code.co_name, frame.f_code.co_filename, repr(arg)[:40]))
if event == 'return':
self.m_depth -= 1
if sys.excepthook != g_excepthook:
set_excepthook()
self.m_frame = frame.f_back
try:
self.m_code_context = self.m_core.m_code_contexts[self.m_frame.f_code]
except AttributeError:
if self.m_event != 'return' and self.m_core.m_ftrap:
#
# An exception is raised from the outer-most frame.
# This means an unhandled exception.
#
self.m_frame = frame
self.m_event = 'exception'
self.m_uef_lineno = self.m_ue_lineno
self.m_fUnhandledException = True
self.m_core._break(self, frame, event, arg)
self.m_uef_lineno = None
if frame in self.m_locals_copy:
self.update_locals()
self.m_frame = None
self.m_core.remove_thread(self.m_thread_id)
sys.setprofile(None)
sys.settrace(self.m_core.trace_dispatch_init)
if self.m_frame_external_references == 0:
return
#
# Wait until no one references the frame object
#
try:
self.m_frame_lock.acquire()
while self.m_frame_external_references != 0:
safe_wait(self.m_frame_lock, 1.0)
finally:
self.m_frame_lock.release()
def frame_acquire(self):
"""
Aquire a reference to the frame.
"""
try:
self.m_frame_lock.acquire()
self.m_frame_external_references += 1
f = self.m_frame
if f is None:
raise ThreadDone
return f
finally:
self.m_frame_lock.release()
def frame_release(self):
"""
Release a reference to the frame.
"""
try:
self.m_frame_lock.acquire()
self.m_frame_external_references -= 1
if self.m_frame_external_references == 0:
self.m_frame_lock.notify()
finally:
self.m_frame_lock.release()
def get_frame(self, base_frame, index, fException = False):
"""
Get frame at index depth down the stack.
Starting from base_frame return the index depth frame
down the stack. If fException is True use the exception
stack (traceback).
"""
if fException:
tb = get_traceback(base_frame, self)
if tb is None:
raise NoExceptionFound
while tb.tb_next is not None:
tb = tb.tb_next
f = tb.tb_frame
else:
f = base_frame
while f is not None:
if not g_fDebug and f.f_code.co_name == 'rpdb2_import_wrapper':
f = f.f_back
continue
if index <= 0:
break
f = f.f_back
index -= 1
if (index < 0) or (f is None):
raise InvalidFrame
if (self.m_uef_lineno is not None) and (f.f_back is None):
lineno = self.m_uef_lineno
else:
lineno = f.f_lineno
if fException:
tb = get_traceback(base_frame, self)
while tb is not None:
if tb.tb_frame == f:
lineno = tb.tb_lineno
break
tb = tb.tb_next
return (f, lineno)
def get_locals_copy(self, frame_index, fException, fReadOnly):
"""
Get globals and locals of frame.
A copy scheme is used for locals to work around a bug in
Python 2.3 and 2.4 that prevents modifying the local dictionary.
"""
try:
base_frame = self.frame_acquire()
(f, lineno) = self.get_frame(base_frame, frame_index, fException)
if fReadOnly:
gc = copy.copy(f.f_globals)
else:
gc = f.f_globals
try:
(lc, olc) = self.m_locals_copy[f]
except KeyError:
if f.f_code.co_name in [MODULE_SCOPE, MODULE_SCOPE2]:
lc = gc
olc = gc
else:
lc = copy.copy(f.f_locals)
olc = copy.copy(lc)
if not fReadOnly:
self.m_locals_copy[f] = (lc, olc)
self.set_local_trace(f)
return (gc, lc, olc)
finally:
f = None
base_frame = None
self.frame_release()
def update_locals_copy(self):
"""
Update copy of locals with changes in locals.
"""
lct = self.m_locals_copy.get(self.m_frame, None)
if lct is None:
return
(lc, base) = lct
cr = copy.copy(self.m_frame.f_locals)
for k in cr:
if not k in base:
lc[k] = cr[k]
continue
if not cr[k] is base[k]:
lc[k] = cr[k]
def update_locals(self):
"""
Update locals with changes from copy of locals.
"""
lct = self.m_locals_copy.pop(self.m_frame, None)
if lct is None:
return
self.m_frame.f_locals.update(lct[0])
def __eval_breakpoint(self, frame, bp):
"""
Return True if the breakpoint is hit.
"""
if not bp.m_fEnabled:
return False
if bp.m_expr == '':
return True
try:
if frame in self.m_locals_copy:
l = self.m_locals_copy[frame][0]
v = eval(bp.m_code, frame.f_globals, l)
else:
v = eval(bp.m_code, frame.f_globals, frame.f_locals)
return (v != False)
except:
return False
def set_local_trace(self, frame, fsignal_exception = False):
"""
Set trace callback of frame.
Specialized trace methods are selected here to save switching time
during actual tracing.
"""
if not self.m_core.m_ftrace:
frame.f_trace = self.trace_dispatch_stop
return
if fsignal_exception:
frame.f_trace = self.trace_dispatch_signal
return
code_context = self.m_core.get_code_context(frame)
if self.m_core.is_break(self, frame):
frame.f_trace = self.trace_dispatch_break
elif code_context.m_fExceptionTrap or (frame.f_back is None):
frame.f_trace = self.trace_dispatch_trap
elif frame.f_code.co_name in self.m_bp_manager.m_break_points_by_function:
frame.f_trace = self.trace_dispatch
elif frame in self.m_locals_copy:
frame.f_trace = self.trace_dispatch
elif frame == self.m_core.m_return_frame:
frame.f_trace = self.trace_dispatch
else:
del frame.f_trace
def set_tracers(self, fsignal_exception = False):
"""
Set trace callbacks for all frames in stack.
"""
try:
try:
f = self.frame_acquire()
while f is not None:
self.set_local_trace(f, fsignal_exception)
f = f.f_back
except ThreadDone:
f = None
finally:
f = None
self.frame_release()
def trace_dispatch_stop(self, frame, event, arg):
"""
Disable tracing for this thread.
"""
if frame in self.m_locals_copy:
self.update_locals()
sys.settrace(None)
sys.setprofile(None)
return None
def trace_dispatch_break(self, frame, event, arg):
"""
Trace method for breaking a thread.
"""
if event not in ['line', 'return', 'exception']:
return frame.f_trace
if event == 'exception':
self.set_exc_info(arg)
self.m_event = event
if frame in self.m_locals_copy:
self.update_locals_copy()
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
return frame.f_trace
def trace_dispatch_call(self, frame, event, arg):
"""
Initial trace method for thread.
"""
if not self.m_core.m_ftrace:
return self.trace_dispatch_stop(frame, event, arg)
self.m_depth += 1
if self.m_depth > g_recursionlimit:
sys.setprofile(self.profile_recursion)
self.m_frame = frame
try:
self.m_code_context = self.m_core.m_code_contexts[frame.f_code]
except KeyError:
self.m_code_context = self.m_core.get_code_context(frame)
if self.m_core.m_fBreak or (self.m_core.m_step_tid == self.m_thread_id):
self.m_event = event
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
return frame.f_trace
if not frame.f_code.co_name in self.m_bp_manager.m_break_points_by_function:
return None
bp = self.m_code_context.m_file_breakpoints.get(frame.f_lineno, None)
if bp is not None and self.__eval_breakpoint(frame, bp):
self.m_event = event
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
return frame.f_trace
return self.trace_dispatch
def trace_dispatch(self, frame, event, arg):
"""
General trace method for thread.
"""
if (event == 'line'):
if frame in self.m_locals_copy:
self.update_locals_copy()
bp = self.m_code_context.m_file_breakpoints.get(frame.f_lineno, None)
if bp is not None and self.__eval_breakpoint(frame, bp):
self.m_event = event
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
return frame.f_trace
if event == 'return':
if frame in self.m_locals_copy:
self.update_locals_copy()
if frame == self.m_core.m_return_frame:
self.m_event = event
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
return None
if event == 'exception':
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
if not is_py3k() and not frame.f_exc_traceback is arg[2]:
(frame.f_exc_type, frame.f_exc_value, frame.f_exc_traceback) = arg
return frame.f_trace
return frame.f_trace
def trace_dispatch_trap(self, frame, event, arg):
"""
Trace method used for frames in which unhandled exceptions
should be caught.
"""
if (event == 'line'):
self.m_event = event
if frame in self.m_locals_copy:
self.update_locals_copy()
bp = self.m_code_context.m_file_breakpoints.get(frame.f_lineno, None)
if bp is not None and self.__eval_breakpoint(frame, bp):
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
return frame.f_trace
if event == 'return':
last_event = self.m_event
self.m_event = event
if frame in self.m_locals_copy:
self.update_locals_copy()
if frame == self.m_core.m_return_frame:
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
if last_event == 'exception':
self.m_event = last_event
return None
if event == 'exception':
self.m_event = event
if self.m_code_context.m_fExceptionTrap and self.m_core.m_ftrap:
self.set_exc_info(arg)
self.m_fUnhandledException = True
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
return frame.f_trace
self.m_ue_lineno = frame.f_lineno
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
if is_py3k():
self.set_exc_info(arg)
elif not frame.f_exc_traceback is arg[2]:
(frame.f_exc_type, frame.f_exc_value, frame.f_exc_traceback) = arg
return frame.f_trace
return frame.f_trace
def trace_dispatch_signal(self, frame, event, arg):
#print_debug('*** trace_dispatch_signal %s, %s, %s' % (frame.f_lineno, event, repr(arg)))
self.set_exc_info(arg)
self.set_tracers()
self.set_depth(frame)
sys.setprofile(self.profile)
return self.trace_dispatch_trap(frame, event, arg)
def set_exc_info(self, arg):
"""
Set exception information.
"""
if arg == None:
return
if is_py3k():
self.m_exc_info = arg
return
(t, v, tb) = arg
while tb is not None:
f = tb.tb_frame
f.f_exc_type = t
f.f_exc_value = v
f.f_exc_traceback = tb
tb = tb.tb_next
def get_exc_info(self):
return self.m_exc_info
def reset_exc_info(self):
self.m_exc_info = None
def is_breakpoint(self):
"""
Calc if current line is hit by breakpoint.
"""
bp = self.m_code_context.m_file_breakpoints.get(self.m_frame.f_lineno, None)
if bp is not None and self.__eval_breakpoint(self.m_frame, bp):
return True
return False
def get_breakpoint(self):
"""
Return current line breakpoint if any.
"""
return self.m_code_context.m_file_breakpoints.get(self.m_frame.f_lineno, None)
class CDebuggerCore:
"""
Base class for the debugger.
Handles basic debugger functionality.
"""
def __init__(self, fembedded = False):
self.m_ftrace = True
self.m_current_ctx = None
self.m_f_first_to_break = True
self.m_f_break_on_init = False
self.m_builtins_hack = None
self.m_timer_embedded_giveup = None
self.m_threads_lock = threading.Condition()
self.m_threads = {}
self.m_event_dispatcher = CEventDispatcher()
self.m_state_manager = CStateManager(STATE_RUNNING, self.m_event_dispatcher)
self.m_ffork_into_child = False
self.m_ffork_auto = False
self.m_fsynchronicity = True
self.m_ftrap = True
self.m_fUnhandledException = False
self.m_fBreak = False
self.m_lastest_event = None
self.m_step_tid = None
self.m_next_frame = None
self.m_return_frame = None
self.m_saved_step = (None, None, None)
self.m_saved_next = None
self.m_bp_manager = CBreakPointsManager()
self.m_code_contexts = {None: None}
self.m_fembedded = fembedded
self.m_embedded_event = threading.Event()
self.m_embedded_sync_t0 = 0
self.m_embedded_sync_t1 = 0
self.m_heartbeats = {0: time.time() + 3600}
def shutdown(self):
self.m_event_dispatcher.shutdown()
self.m_state_manager.shutdown()
def is_embedded(self):
return self.m_fembedded
def send_fork_switch(self, sync_n):
"""
Notify client that debuggee is forking and that it should
try to reconnect to the child.
"""
print_debug('Sending fork switch event')
event = CEventForkSwitch(sync_n)
self.m_event_dispatcher.fire_event(event)
def send_exec_switch(self, sync_n):
"""
Notify client that debuggee is doing an exec and that it should
try to reconnect (in case the exec failed).
"""
print_debug('Sending exec switch event')
event = CEventExecSwitch(sync_n)
self.m_event_dispatcher.fire_event(event)
def send_event_exit(self):
"""
Notify client that the debuggee is shutting down.
"""
event = CEventExit()
self.m_event_dispatcher.fire_event(event)
def send_events(self, event):
pass
def set_request_go_timer(self, timeout):
"""
Set timeout thread to release debugger from waiting for a client
to attach.
"""
self.cancel_request_go_timer()
if timeout is None:
return
_timeout = max(1.0, timeout)
f = lambda: (
self.record_client_heartbeat(0, False, True),
self.request_go()
)
self.m_timer_embedded_giveup = threading.Timer(_timeout, f)
self.m_timer_embedded_giveup.start()
#
# sleep() releases control and allow timer thread to actually start
# before this scope returns.
#
time.sleep(0.1)
def cancel_request_go_timer(self):
t = self.m_timer_embedded_giveup
if t is not None:
self.m_timer_embedded_giveup = None
t.cancel()
def setbreak(self, f):
"""
Set thread to break on next statement.
"""
if not self.m_ftrace:
return
tid = thread.get_ident()
if not tid in self.m_threads:
return self.settrace(f)
ctx = self.m_threads[tid]
f.f_trace = ctx.trace_dispatch_break
self.m_saved_next = self.m_next_frame
self.m_next_frame = f
def settrace(self, f = None, f_break_on_init = True, timeout = None, builtins_hack = None):
"""
Start tracing mechanism for thread.
"""
if not self.m_ftrace:
return
tid = thread.get_ident()
if tid in self.m_threads:
return
self.set_request_go_timer(timeout)
self.m_f_break_on_init = f_break_on_init
self.m_builtins_hack = builtins_hack
threading.settrace(self.trace_dispatch_init)
sys.settrace(self.trace_dispatch_init)
if f is not None:
f.f_trace = self.trace_dispatch_init
def stoptrace(self):
"""
Stop tracing mechanism.
"""
global g_fignore_atexit
g_fignore_atexit = True
threading.settrace(None)
sys.settrace(None)
sys.setprofile(None)
self.m_ftrace = False
self.set_all_tracers()
try:
self.request_go()
except DebuggerNotBroken:
pass
#self.m_threads = {}
def get_code_context(self, frame):
try:
return self.m_code_contexts[frame.f_code]
except KeyError:
if self.m_builtins_hack != None:
if calc_frame_path(frame) == self.m_builtins_hack:
self.m_builtins_hack = None
frame.f_globals['__builtins__'] = g_builtins_module
code_context = CCodeContext(frame, self.m_bp_manager)
return self.m_code_contexts.setdefault(frame.f_code, code_context)
def get_current_ctx(self):
if len(self.m_threads) == 0:
raise NoThreads
return self.m_current_ctx
def get_ctx(self, tid):
ctx = self.m_threads.get(tid, None)
if ctx == None:
raise ThreadNotFound
return ctx
def wait_for_first_thread(self):
"""
Wait until at least one debuggee thread is alive.
Python can have 0 threads in some circumstances as
embedded Python and the Python interpreter console.
"""
if self.m_current_ctx is not None:
return
try:
self.m_threads_lock.acquire()
while self.m_current_ctx is None:
safe_wait(self.m_threads_lock, 1.0)
finally:
self.m_threads_lock.release()
def notify_first_thread(self):
"""
Notify that first thread is available for tracing.
"""
try:
self.m_threads_lock.acquire()
self.m_threads_lock.notify()
finally:
self.m_threads_lock.release()
def set_exception_trap_frame(self, frame):
"""
Set trap for unhandled exceptions in relevant frame.
"""
while frame is not None:
code_context = self.get_code_context(frame)
if code_context.is_exception_trap_frame():
code_context.m_fExceptionTrap = True
return
frame = frame.f_back
def __set_signal_handler(self):
"""
Set rpdb2 to wrap all signal handlers.
"""
for key, value in list(vars(signal).items()):
if not key.startswith('SIG') or key in ['SIG_IGN', 'SIG_DFL', 'SIGRTMIN', 'SIGRTMAX']:
continue
handler = signal.getsignal(value)
if handler in [signal.SIG_IGN, signal.SIG_DFL]:
continue
try:
signal.signal(value, handler)
except:
print_debug('Failed to set signal handler for signal %s(%d)' % (key, value))
def clear_source_cache(self):
g_lines_cache.clear()
event = CEventClearSourceCache()
self.m_event_dispatcher.fire_event(event)
def trace_dispatch_init(self, frame, event, arg):
"""
Initial tracing method.
"""
if event not in ['call', 'line', 'return']:
return None
code_context = self.get_code_context(frame)
if event == 'call' and code_context.is_untraced():
return None
self.set_exception_trap_frame(frame)
try:
t = current_thread()
name = thread_get_name(t)
except:
name = ''
if name == 'MainThread':
self.__set_signal_handler()
ctx = CDebuggerCoreThread(name, self, frame, event)
ctx.set_tracers()
try:
self.m_threads_lock.acquire()
self.m_threads[ctx.m_thread_id] = ctx
nthreads = len(self.m_threads)
if nthreads == 1:
self.prepare_embedded_sync()
finally:
self.m_threads_lock.release()
if nthreads == 1:
self.clear_source_cache()
self.m_current_ctx = ctx
self.notify_first_thread()
if self.m_f_break_on_init:
self.m_f_break_on_init = False
self.request_break()
sys.settrace(ctx.trace_dispatch_call)
sys.setprofile(ctx.profile)
self.wait_embedded_sync(nthreads == 1)
if event == 'call':
return ctx.trace_dispatch_call(frame, event, arg)
elif hasattr(frame, 'f_trace') and (frame.f_trace is not None):
return frame.f_trace(frame, event, arg)
else:
return None
def prepare_embedded_sync(self):
if not self.m_fembedded:
return
t = time.time()
t0 = self.m_embedded_sync_t0
if t0 != 0:
self.fix_heartbeats(t - t0)
if self.get_clients_attached() == 0:
return
if t - t0 < EMBEDDED_SYNC_THRESHOLD:
return
self.m_embedded_sync_t1 = t
self.m_embedded_event.clear()
def wait_embedded_sync(self, ftrigger):
if not self.m_fembedded:
return
t = time.time()
t0 = self.m_embedded_sync_t0
t1 = self.m_embedded_sync_t1
if t - t0 < EMBEDDED_SYNC_THRESHOLD:
return
if t - t1 >= EMBEDDED_SYNC_TIMEOUT:
return
if ftrigger:
event = CEventEmbeddedSync()
self.m_event_dispatcher.fire_event(event)
safe_wait(self.m_embedded_event, EMBEDDED_SYNC_TIMEOUT - (t - t1))
if ftrigger:
self.m_embedded_sync_t1 = 0
def embedded_sync(self):
self.m_embedded_event.set()
def set_all_tracers(self):
"""
Set trace methods for all frames of all threads.
"""
for ctx in list(self.m_threads.values()):
ctx.set_tracers()
def remove_thread(self, thread_id):
try:
del self.m_threads[thread_id]
if self.m_current_ctx.m_thread_id == thread_id:
self.m_current_ctx = list(self.m_threads.values())[0]
except (KeyError, IndexError):
self.m_embedded_sync_t0 = time.time()
def set_break_flag(self):
self.m_fBreak = (self.m_state_manager.get_state() == STATE_BROKEN)
def is_break(self, ctx, frame, event = None):
if self.m_fBreak:
return True
if ctx.m_fUnhandledException:
return True
if self.m_step_tid == ctx.m_thread_id:
return True
if self.m_next_frame == frame:
return True
if (self.m_return_frame == frame) and (event == 'return'):
return True
return False
def record_client_heartbeat(self, id, finit, fdetach):
"""
Record that client id is still attached.
"""
if finit:
self.m_heartbeats.pop(0, None)
if fdetach:
self.m_heartbeats.pop(id, None)
return
if finit or id in self.m_heartbeats:
self.m_heartbeats[id] = time.time()
def fix_heartbeats(self, missing_pulse):
for k, v in list(self.m_heartbeats.items()):
self.m_heartbeats[k] = v + missing_pulse
def get_clients_attached(self):
n = 0
t = time.time()
for v in list(self.m_heartbeats.values()):
if t < v + HEARTBEAT_TIMEOUT:
n += 1
return n
def is_waiting_for_attach(self):
if self.get_clients_attached() != 1:
return False
if list(self.m_heartbeats.keys()) != [0]:
return False
return True
def _break(self, ctx, frame, event, arg):
"""
Main break logic.
"""
global g_fos_exit
global g_module_main
if not self.is_break(ctx, frame, event) and not ctx.is_breakpoint():
ctx.set_tracers()
return
ctx.m_fBroken = True
f_full_notification = False
f_uhe_notification = False
step_tid = self.m_step_tid
try:
self.m_state_manager.acquire()
if self.m_state_manager.get_state() != STATE_BROKEN:
self.set_break_dont_lock()
if g_module_main == -1:
try:
g_module_main = sys.modules['__main__']
except:
g_module_main = None
if not is_py3k() and not frame.f_exc_traceback is None:
ctx.set_exc_info((frame.f_exc_type, frame.f_exc_value, frame.f_exc_traceback))
if is_py3k() and ctx.get_exc_info() == None and sys.exc_info()[2] != None:
ctx.set_exc_info(sys.exc_info())
try:
t = current_thread()
ctx.m_thread_name = thread_get_name(t)
except:
pass
if ctx.m_fUnhandledException and not self.m_fUnhandledException:
self.m_fUnhandledException = True
f_uhe_notification = True
if self.is_auto_fork_first_stage(ctx.m_thread_id):
self.m_saved_step = (self.m_step_tid, self.m_saved_next, self.m_return_frame)
self.m_saved_next = None
self.m_bp_manager.m_fhard_tbp = True
if self.m_f_first_to_break or (self.m_current_ctx == ctx):
self.m_current_ctx = ctx
self.m_lastest_event = event
self.m_step_tid = None
self.m_next_frame = None
self.m_return_frame = None
self.m_saved_next = None
self.m_bp_manager.del_temp_breakpoint(breakpoint = ctx.get_breakpoint())
self.m_f_first_to_break = False
f_full_notification = True
finally:
self.m_state_manager.release()
ffork_second_stage = self.handle_fork(ctx)
self.handle_exec(ctx)
if self.is_auto_fork_first_stage(ctx.m_thread_id):
self.request_go_quiet()
elif self.m_ffork_auto and ffork_second_stage:
(self.m_step_tid, self.m_next_frame, self.m_return_frame) = self.m_saved_step
self.m_saved_step = (None, None, None)
self.m_bp_manager.m_fhard_tbp = False
self.request_go_quiet()
elif self.get_clients_attached() == 0:
#print_debug('state: %s' % self.m_state_manager.get_state())
self.request_go_quiet()
elif step_tid == ctx.m_thread_id and frame.f_code.co_name == 'rpdb2_import_wrapper':
self.request_step_quiet()
else:
if f_full_notification:
self.send_events(None)
else:
self.notify_thread_broken(ctx.m_thread_id, ctx.m_thread_name)
self.notify_namespace()
if f_uhe_notification:
self.send_unhandled_exception_event()
state = self.m_state_manager.wait_for_state([STATE_RUNNING])
self.prepare_fork_step(ctx.m_thread_id)
self.prepare_exec_step(ctx.m_thread_id)
ctx.m_fUnhandledException = False
ctx.m_fBroken = False
ctx.set_tracers()
ctx.reset_exc_info()
if g_fos_exit:
g_fos_exit = False
self.send_event_exit()
time.sleep(1.0)
self.stoptrace()
def is_auto_fork_first_stage(self, tid):
if not self.m_ffork_auto:
return False
return tid == g_forktid and g_forkpid == None
def prepare_fork_step(self, tid):
global g_forkpid
global g_ignore_broken_pipe
if tid != g_forktid:
return
self.m_step_tid = tid
g_forkpid = os.getpid()
if not self.m_ffork_into_child:
return
n = self.get_clients_attached()
self.send_fork_switch(n)
time.sleep(0.5)
g_server.shutdown()
CThread.joinAll()
g_ignore_broken_pipe = time.time()
def handle_fork(self, ctx):
global g_forktid
global g_forkpid
tid = ctx.m_thread_id
if g_forkpid == None or tid != g_forktid:
return False
forkpid = g_forkpid
g_forkpid = None
g_forktid = None
if os.getpid() == forkpid:
#
# Parent side of fork().
#
if not self.m_ffork_into_child:
#CThread.clearJoin()
#g_server.jumpstart()
return True
self.stoptrace()
return False
#
# Child side of fork().
#
if not self.m_ffork_into_child:
self.stoptrace()
return False
self.m_threads = {tid: ctx}
CThread.clearJoin()
g_server.jumpstart()
return True
def prepare_exec_step(self, tid):
global g_execpid
if tid != g_exectid:
return
self.m_step_tid = tid
g_execpid = os.getpid()
n = self.get_clients_attached()
self.send_exec_switch(n)
time.sleep(0.5)
g_server.shutdown()
CThread.joinAll()
def handle_exec(self, ctx):
global g_exectid
global g_execpid
tid = ctx.m_thread_id
if g_execpid == None or tid != g_exectid:
return False
g_execpid = None
g_exectid = None
#
# If we are here it means that the exec failed.
# Jumpstart the debugger to allow debugging to continue.
#
CThread.clearJoin()
g_server.jumpstart()
return True
def notify_thread_broken(self, tid, name):
"""
Notify that thread (tid) has broken.
This notification is sent for each thread that breaks after
the first one.
"""
_event = CEventThreadBroken(tid, name)
self.m_event_dispatcher.fire_event(_event)
def notify_namespace(self):
"""
Notify that a namespace update query should be done.
"""
_event = CEventNamespace()
self.m_event_dispatcher.fire_event(_event)
def get_state(self):
return self.m_state_manager.get_state()
def verify_broken(self):
if self.m_state_manager.get_state() != STATE_BROKEN:
raise DebuggerNotBroken
def get_current_filename(self, frame_index, fException):
"""
Return path of sources corresponding to the frame at depth
'frame_index' down the stack of the current thread.
"""
ctx = self.get_current_ctx()
try:
f = None
base_frame = ctx.frame_acquire()
(f, frame_lineno) = ctx.get_frame(base_frame, frame_index, fException)
frame_filename = calc_frame_path(f)
return frame_filename
finally:
f = None
base_frame = None
ctx.frame_release()
def get_threads(self):
return self.m_threads
def set_break_dont_lock(self):
self.m_f_first_to_break = True
self.m_state_manager.set_state(STATE_BROKEN, fLock = False)
self.set_break_flag()
self.set_all_tracers()
def request_break(self):
"""
Ask debugger to break (pause debuggee).
"""
if len(self.m_threads) == 0:
self.wait_for_first_thread()
try:
self.m_state_manager.acquire()
if self.m_state_manager.get_state() == STATE_BROKEN:
return
self.set_break_dont_lock()
finally:
self.m_state_manager.release()
self.send_events(None)
def request_go_quiet(self, fLock = True):
try:
self.request_go(fLock)
except DebuggerNotBroken:
pass
def request_go(self, fLock = True):
"""
Let debugger run.
"""
try:
if fLock:
self.m_state_manager.acquire()
self.verify_broken()
self.m_fUnhandledException = False
self.m_state_manager.set_state(STATE_RUNNING, fLock = False)
if self.m_fembedded:
time.sleep(0.33)
self.set_break_flag()
finally:
if fLock:
self.m_state_manager.release()
def request_go_breakpoint(self, filename, scope, lineno, frame_index, fException):
"""
Let debugger run until temp breakpoint as defined in the arguments.
"""
assert(is_unicode(filename))
assert(is_unicode(scope))
try:
self.m_state_manager.acquire()
self.verify_broken()
if filename in [None, '']:
_filename = self.get_current_filename(frame_index, fException)
elif not is_provider_filesystem(filename):
_filename = as_string(filename, sys.getfilesystemencoding())
else:
_filename = FindFile(filename, fModules = True)
self.m_bp_manager.set_temp_breakpoint(_filename, scope, lineno)
self.set_all_tracers()
self.request_go(fLock = False)
finally:
self.m_state_manager.release()
def request_step_quiet(self, fLock = True):
try:
self.request_step(fLock)
except DebuggerNotBroken:
pass
def request_step(self, fLock = True):
"""
Let debugger run until next statement is reached or a breakpoint
is hit in another thread.
"""
try:
if fLock:
self.m_state_manager.acquire()
self.verify_broken()
try:
ctx = self.get_current_ctx()
except NoThreads:
return
self.m_step_tid = ctx.m_thread_id
self.m_next_frame = None
self.m_return_frame = None
self.request_go(fLock = False)
finally:
if fLock:
self.m_state_manager.release()
def request_next(self):
"""
Let debugger run until next statement in the same frame
is reached or a breakpoint is hit in another thread.
"""
try:
self.m_state_manager.acquire()
self.verify_broken()
try:
ctx = self.get_current_ctx()
except NoThreads:
return
if self.m_lastest_event in ['return', 'exception']:
return self.request_step(fLock = False)
self.m_next_frame = ctx.m_frame
self.m_return_frame = None
self.request_go(fLock = False)
finally:
self.m_state_manager.release()
def request_return(self):
"""
Let debugger run until end of frame frame is reached
or a breakpoint is hit in another thread.
"""
try:
self.m_state_manager.acquire()
self.verify_broken()
try:
ctx = self.get_current_ctx()
except NoThreads:
return
if self.m_lastest_event == 'return':
return self.request_step(fLock = False)
self.m_next_frame = None
self.m_return_frame = ctx.m_frame
self.request_go(fLock = False)
finally:
self.m_state_manager.release()
def request_jump(self, lineno):
"""
Jump to line number 'lineno'.
"""
try:
self.m_state_manager.acquire()
self.verify_broken()
try:
ctx = self.get_current_ctx()
except NoThreads:
return
frame = ctx.m_frame
code = frame.f_code
valid_lines = CalcValidLines(code)
sbi = CScopeBreakInfo(as_unicode(''), valid_lines)
l = sbi.CalcScopeLine(lineno)
frame.f_lineno = l
finally:
frame = None
self.m_state_manager.release()
self.send_events(None)
def set_thread(self, tid):
"""
Switch focus to specified thread.
"""
try:
self.m_state_manager.acquire()
self.verify_broken()
try:
if (tid >= 0) and (tid < 100):
_tid = list(self.m_threads.keys())[tid]
else:
_tid = tid
ctx = self.m_threads[_tid]
except (IndexError, KeyError):
raise ThreadNotFound
self.m_current_ctx = ctx
self.m_lastest_event = ctx.m_event
finally:
self.m_state_manager.release()
self.send_events(None)
class CDebuggerEngine(CDebuggerCore):
"""
Main class for the debugger.
Adds functionality on top of CDebuggerCore.
"""
def __init__(self, fembedded = False):
CDebuggerCore.__init__(self, fembedded)
event_type_dict = {
CEventState: {},
CEventStackDepth: {},
CEventBreakpoint: {},
CEventThreads: {},
CEventNoThreads: {},
CEventThreadBroken: {},
CEventNamespace: {},
CEventUnhandledException: {},
CEventStack: {},
CEventNull: {},
CEventExit: {},
CEventForkSwitch: {},
CEventExecSwitch: {},
CEventSynchronicity: {},
CEventTrap: {},
CEventForkMode: {},
CEventPsycoWarning: {},
CEventConflictingModules: {},
CEventSignalIntercepted: {},
CEventSignalException: {},
CEventClearSourceCache: {},
CEventEmbeddedSync: {}
}
self.m_event_queue = CEventQueue(self.m_event_dispatcher)
self.m_event_queue.register_event_types(event_type_dict)
event_type_dict = {CEventSync: {}}
self.m_event_dispatcher.register_callback(self.send_events, event_type_dict, fSingleUse = False)
def shutdown(self):
self.m_event_queue.shutdown()
CDebuggerCore.shutdown(self)
def sync_with_events(self, fException, fSendUnhandled):
"""
Send debugger state to client.
"""
if len(self.m_threads) == 0:
self.wait_for_first_thread()
index = self.m_event_queue.get_event_index()
event = CEventSync(fException, fSendUnhandled)
self.m_event_dispatcher.fire_event(event)
return index
def trap_conflicting_modules(self):
modules_list = []
for m in CONFLICTING_MODULES:
if m in g_found_conflicting_modules:
continue
if not m in sys.modules:
continue
if m == 'psyco':
#
# Old event kept for compatibility.
#
event = CEventPsycoWarning()
self.m_event_dispatcher.fire_event(event)
g_found_conflicting_modules.append(m)
modules_list.append(as_unicode(m))
if modules_list == []:
return False
event = CEventConflictingModules(modules_list)
self.m_event_dispatcher.fire_event(event)
return True
def wait_for_event(self, timeout, event_index):
"""
Wait for new events and return them as list of events.
"""
self.cancel_request_go_timer()
self.trap_conflicting_modules()
(new_event_index, sel) = self.m_event_queue.wait_for_event(timeout, event_index)
if self.trap_conflicting_modules():
(new_event_index, sel) = self.m_event_queue.wait_for_event(timeout, event_index)
return (new_event_index, sel)
def set_breakpoint(self, filename, scope, lineno, fEnabled, expr, frame_index, fException, encoding):
print_debug('Setting breakpoint to: %s, %s, %d' % (repr(filename), scope, lineno))
assert(is_unicode(filename))
assert(is_unicode(scope))
assert(is_unicode(expr))
fLock = False
try:
if filename in [None, '']:
self.m_state_manager.acquire()
fLock = True
self.verify_broken()
_filename = self.get_current_filename(frame_index, fException)
elif not is_provider_filesystem(filename):
_filename = as_string(filename, sys.getfilesystemencoding())
else:
_filename = FindFile(filename, fModules = True)
if expr != '':
try:
encoding = self.__calc_encoding(encoding, filename = _filename)
_expr = as_bytes(ENCODING_SOURCE % encoding + expr, encoding)
compile(_expr, '<string>', 'eval')
except:
raise SyntaxError
encoding = as_unicode(encoding)
bp = self.m_bp_manager.set_breakpoint(_filename, scope, lineno, fEnabled, expr, encoding)
self.set_all_tracers()
event = CEventBreakpoint(bp)
#print_debug(repr(vars(bp)))
self.m_event_dispatcher.fire_event(event)
finally:
if fLock:
self.m_state_manager.release()
def disable_breakpoint(self, id_list, fAll):
self.m_bp_manager.disable_breakpoint(id_list, fAll)
self.set_all_tracers()
event = CEventBreakpoint(None, CEventBreakpoint.DISABLE, id_list, fAll)
self.m_event_dispatcher.fire_event(event)
def enable_breakpoint(self, id_list, fAll):
self.m_bp_manager.enable_breakpoint(id_list, fAll)
self.set_all_tracers()
event = CEventBreakpoint(None, CEventBreakpoint.ENABLE, id_list, fAll)
self.m_event_dispatcher.fire_event(event)
def delete_breakpoint(self, id_list, fAll):
self.m_bp_manager.delete_breakpoint(id_list, fAll)
self.set_all_tracers()
event = CEventBreakpoint(None, CEventBreakpoint.REMOVE, id_list, fAll)
self.m_event_dispatcher.fire_event(event)
def get_breakpoints(self):
"""
return id->breakpoint dictionary.
"""
bpl = self.m_bp_manager.get_breakpoints()
_items = [(id, breakpoint_copy(bp)) for (id, bp) in bpl.items()]
for (id, bp) in _items:
bp.m_code = None
_bpl = dict(_items)
return _bpl
def send_events(self, event):
"""
Send series of events that define the debugger state.
"""
if isinstance(event, CEventSync):
fException = event.m_fException
fSendUnhandled = event.m_fSendUnhandled
else:
fException = False
fSendUnhandled = False
try:
if isinstance(event, CEventSync) and not fException:
self.m_state_manager.set_state()
self.send_stack_depth()
self.send_threads_event(fException)
self.send_stack_event(fException)
self.send_namespace_event()
if fSendUnhandled and self.m_fUnhandledException:
self.send_unhandled_exception_event()
except NoThreads:
self.send_no_threads_event()
except:
print_debug_exception()
raise
def send_unhandled_exception_event(self):
event = CEventUnhandledException()
self.m_event_dispatcher.fire_event(event)
def send_stack_depth(self):
"""
Send event with stack depth and exception stack depth.
"""
f = None
tb = None
ctx = self.get_current_ctx()
try:
try:
f = ctx.frame_acquire()
except ThreadDone:
return
s = my_extract_stack(f)
s = [1 for (a, b, c, d) in s if g_fDebug or c != 'rpdb2_import_wrapper']
stack_depth = len(s)
tb = get_traceback(f, ctx)
if tb == None:
stack_depth_exception = None
else:
s = my_extract_stack(tb.tb_frame.f_back)
s += my_extract_tb(tb)
s = [1 for (a, b, c, d) in s if g_fDebug or c != 'rpdb2_import_wrapper']
stack_depth_exception = len(s)
event = CEventStackDepth(stack_depth, stack_depth_exception)
self.m_event_dispatcher.fire_event(event)
finally:
f = None
tb = None
ctx.frame_release()
def send_threads_event(self, fException):
"""
Send event with current thread list.
In case of exception, send only the current thread.
"""
tl = self.get_thread_list()
if fException:
ctid = tl[0]
itl = tl[1]
_itl = [a for a in itl if a[DICT_KEY_TID] == ctid]
_tl = (ctid, _itl)
else:
_tl = tl
event = CEventThreads(*_tl)
self.m_event_dispatcher.fire_event(event)
def send_stack_event(self, fException):
sl = self.get_stack([], False, fException)
if len(sl) == 0:
return
event = CEventStack(sl[0])
self.m_event_dispatcher.fire_event(event)
def send_namespace_event(self):
"""
Send event notifying namespace should be queried again.
"""
event = CEventNamespace()
self.m_event_dispatcher.fire_event(event)
def send_no_threads_event(self):
_event = CEventNoThreads()
self.m_event_dispatcher.fire_event(_event)
def send_event_null(self):
"""
Make the event waiter return.
"""
event = CEventNull()
self.m_event_dispatcher.fire_event(event)
def __get_stack(self, ctx, ctid, fException):
tid = ctx.m_thread_id
f = None
_f = None
tb = None
_tb = None
try:
try:
f = ctx.frame_acquire()
except ThreadDone:
return None
if fException:
tb = get_traceback(f, ctx)
if tb == None:
raise NoExceptionFound
_tb = tb
while _tb.tb_next is not None:
_tb = _tb.tb_next
_f = _tb.tb_frame
s = my_extract_stack(tb.tb_frame.f_back)
s += my_extract_tb(tb)
else:
_f = f
s = my_extract_stack(f)
code_list = []
while _f is not None:
rc = repr(_f.f_code).split(',')[0].split()[-1]
rc = as_unicode(rc)
code_list.insert(0, rc)
_f = _f.f_back
finally:
f = None
_f = None
tb = None
_tb = None
ctx.frame_release()
#print code_list
__s = [(a, b, c, d) for (a, b, c, d) in s if g_fDebug or c != 'rpdb2_import_wrapper']
if (ctx.m_uef_lineno is not None) and (len(__s) > 0):
(a, b, c, d) = __s[0]
__s = [(a, ctx.m_uef_lineno, c, d)] + __s[1:]
r = {}
r[DICT_KEY_STACK] = __s
r[DICT_KEY_CODE_LIST] = code_list
r[DICT_KEY_TID] = tid
r[DICT_KEY_BROKEN] = ctx.m_fBroken
r[DICT_KEY_EVENT] = as_unicode([ctx.m_event, 'exception'][fException])
if tid == ctid:
r[DICT_KEY_CURRENT_TID] = True
return r
def get_stack(self, tid_list, fAll, fException):
if fException and (fAll or (len(tid_list) != 0)):
raise BadArgument
ctx = self.get_current_ctx()
ctid = ctx.m_thread_id
if fAll:
ctx_list = list(self.get_threads().values())
elif fException or (len(tid_list) == 0):
ctx_list = [ctx]
else:
ctx_list = [self.get_threads().get(t, None) for t in tid_list]
_sl = [self.__get_stack(ctx, ctid, fException) for ctx in ctx_list if ctx is not None]
sl = [s for s in _sl if s is not None]
return sl
def get_source_file(self, filename, lineno, nlines, frame_index, fException):
assert(is_unicode(filename))
if lineno < 1:
lineno = 1
nlines = -1
_lineno = lineno
r = {}
frame_filename = None
try:
ctx = self.get_current_ctx()
try:
f = None
base_frame = None
base_frame = ctx.frame_acquire()
(f, frame_lineno) = ctx.get_frame(base_frame, frame_index, fException)
frame_filename = calc_frame_path(f)
finally:
f = None
base_frame = None
ctx.frame_release()
frame_event = [[ctx.m_event, 'call'][frame_index > 0], 'exception'][fException]
except NoThreads:
if filename in [None, '']:
raise
if filename in [None, '']:
__filename = frame_filename
r[DICT_KEY_TID] = ctx.m_thread_id
elif not is_provider_filesystem(filename):
__filename = as_string(filename, sys.getfilesystemencoding())
else:
__filename = FindFile(filename, fModules = True)
if not IsPythonSourceFile(__filename):
raise NotPythonSource
_filename = winlower(__filename)
lines = []
breakpoints = {}
fhide_pwd_mode = False
while nlines != 0:
try:
g_traceback_lock.acquire()
line = get_source_line(_filename, _lineno)
finally:
g_traceback_lock.release()
if line == '':
break
#
# Remove any trace of session password from data structures that
# go over the network.
#
if fhide_pwd_mode:
if not ')' in line:
line = as_unicode('...\n')
else:
line = '...""")' + line.split(')', 1)[1]
fhide_pwd_mode = False
elif 'start_embedded_debugger(' in line:
ls = line.split('start_embedded_debugger(', 1)
line = ls[0] + 'start_embedded_debugger("""...Removed-password-from-output...'
if ')' in ls[1]:
line += '""")' + ls[1].split(')', 1)[1]
else:
line += '\n'
fhide_pwd_mode = True
lines.append(line)
try:
bp = self.m_bp_manager.get_breakpoint(_filename, _lineno)
breakpoints[_lineno] = as_unicode([STATE_DISABLED, STATE_ENABLED][bp.isEnabled()])
except KeyError:
pass
_lineno += 1
nlines -= 1
if frame_filename == _filename:
r[DICT_KEY_FRAME_LINENO] = frame_lineno
r[DICT_KEY_EVENT] = as_unicode(frame_event)
r[DICT_KEY_BROKEN] = ctx.m_fBroken
r[DICT_KEY_LINES] = lines
r[DICT_KEY_FILENAME] = as_unicode(_filename, sys.getfilesystemencoding())
r[DICT_KEY_BREAKPOINTS] = breakpoints
r[DICT_KEY_FIRST_LINENO] = lineno
return r
def __get_source(self, ctx, nlines, frame_index, fException):
tid = ctx.m_thread_id
_frame_index = [0, frame_index][tid == self.m_current_ctx.m_thread_id]
try:
try:
f = None
base_frame = None
base_frame = ctx.frame_acquire()
(f, frame_lineno) = ctx.get_frame(base_frame, _frame_index, fException)
frame_filename = calc_frame_path(f)
except (ThreadDone, InvalidFrame):
return None
finally:
f = None
base_frame = None
ctx.frame_release()
frame_event = [[ctx.m_event, 'call'][frame_index > 0], 'exception'][fException]
first_line = max(1, frame_lineno - nlines // 2)
_lineno = first_line
lines = []
breakpoints = {}
fhide_pwd_mode = False
while nlines != 0:
try:
g_traceback_lock.acquire()
line = get_source_line(frame_filename, _lineno)
finally:
g_traceback_lock.release()
if line == '':
break
#
# Remove any trace of session password from data structures that
# go over the network.
#
if fhide_pwd_mode:
if not ')' in line:
line = as_unicode('...\n')
else:
line = '...""")' + line.split(')', 1)[1]
fhide_pwd_mode = False
elif 'start_embedded_debugger(' in line:
ls = line.split('start_embedded_debugger(', 1)
line = ls[0] + 'start_embedded_debugger("""...Removed-password-from-output...'
if ')' in ls[1]:
line += '""")' + ls[1].split(')', 1)[1]
else:
line += '\n'
fhide_pwd_mode = True
lines.append(line)
try:
bp = self.m_bp_manager.get_breakpoint(frame_filename, _lineno)
breakpoints[_lineno] = as_unicode([STATE_DISABLED, STATE_ENABLED][bp.isEnabled()])
except KeyError:
pass
_lineno += 1
nlines -= 1
r = {}
r[DICT_KEY_FRAME_LINENO] = frame_lineno
r[DICT_KEY_EVENT] = as_unicode(frame_event)
r[DICT_KEY_BROKEN] = ctx.m_fBroken
r[DICT_KEY_TID] = tid
r[DICT_KEY_LINES] = lines
r[DICT_KEY_FILENAME] = as_unicode(frame_filename, sys.getfilesystemencoding())
r[DICT_KEY_BREAKPOINTS] = breakpoints
r[DICT_KEY_FIRST_LINENO] = first_line
return r
def get_source_lines(self, nlines, fAll, frame_index, fException):
if fException and fAll:
raise BadArgument
if fAll:
ctx_list = list(self.get_threads().values())
else:
ctx = self.get_current_ctx()
ctx_list = [ctx]
_sl = [self.__get_source(ctx, nlines, frame_index, fException) for ctx in ctx_list]
sl = [s for s in _sl if s is not None]
return sl
def __get_locals_globals(self, frame_index, fException, fReadOnly = False):
ctx = self.get_current_ctx()
(_globals, _locals, _original_locals_copy) = ctx.get_locals_copy(frame_index, fException, fReadOnly)
return (_globals, _locals, _original_locals_copy)
def __calc_number_of_subnodes(self, r):
for t in [bytearray, bytes, str, str8, unicode, int, long, float, bool, type(None)]:
if t is type(r):
return 0
try:
try:
if isinstance(r, frozenset) or isinstance(r, set):
return len(r)
except NameError:
pass
if isinstance(r, sets.BaseSet):
return len(r)
if isinstance(r, dict):
return len(r)
if isinstance(r, list):
return len(r)
if isinstance(r, tuple):
return len(r)
return len(dir(r))
except AttributeError:
return 0
return 0
def __calc_subnodes(self, expr, r, fForceNames, filter_level, repr_limit, encoding):
snl = []
try:
if isinstance(r, frozenset) or isinstance(r, set):
if len(r) > MAX_SORTABLE_LENGTH:
g = r
else:
g = [i for i in r]
sort(g)
for i in g:
if len(snl) >= MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
break
is_valid = [True]
rk = repr_ltd(i, REPR_ID_LENGTH, encoding = ENCODING_RAW_I)
e = {}
e[DICT_KEY_EXPR] = as_unicode('_RPDB2_FindRepr((%s), %d)["%s"]' % (expr, REPR_ID_LENGTH, rk.replace('"', '"')))
e[DICT_KEY_NAME] = repr_ltd(i, repr_limit, encoding)
e[DICT_KEY_REPR] = repr_ltd(i, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(i)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(i)
snl.append(e)
return snl
except NameError:
pass
if isinstance(r, sets.BaseSet):
if len(r) > MAX_SORTABLE_LENGTH:
g = r
else:
g = [i for i in r]
sort(g)
for i in g:
if len(snl) >= MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
break
is_valid = [True]
rk = repr_ltd(i, REPR_ID_LENGTH, encoding = ENCODING_RAW_I)
e = {}
e[DICT_KEY_EXPR] = as_unicode('_RPDB2_FindRepr((%s), %d)["%s"]' % (expr, REPR_ID_LENGTH, rk.replace('"', '"')))
e[DICT_KEY_NAME] = repr_ltd(i, repr_limit, encoding)
e[DICT_KEY_REPR] = repr_ltd(i, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(i)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(i)
snl.append(e)
return snl
if isinstance(r, list) or isinstance(r, tuple):
for i, v in enumerate(r[0: MAX_NAMESPACE_ITEMS]):
is_valid = [True]
e = {}
e[DICT_KEY_EXPR] = as_unicode('(%s)[%d]' % (expr, i))
e[DICT_KEY_NAME] = as_unicode(repr(i))
e[DICT_KEY_REPR] = repr_ltd(v, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(v)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(v)
snl.append(e)
if len(r) > MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
return snl
if isinstance(r, dict):
if filter_level == 2 and expr in ['locals()', 'globals()']:
r = copy.copy(r)
for k, v in list(r.items()):
if parse_type(type(v)) in ['function', 'classobj', 'type']:
del r[k]
if len(r) > MAX_SORTABLE_LENGTH:
kl = r
else:
kl = list(r.keys())
sort(kl)
for k in kl:
#
# Remove any trace of session password from data structures that
# go over the network.
#
if k in ['_RPDB2_FindRepr', '_RPDB2_builtins', '_rpdb2_args', '_rpdb2_pwd', 'm_rpdb2_pwd']:
continue
v = r[k]
if len(snl) >= MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
break
is_valid = [True]
e = {}
if [True for t in [bool, int, float, bytes, str, unicode, type(None)] if t is type(k)]:
rk = repr(k)
if len(rk) < REPR_ID_LENGTH:
e[DICT_KEY_EXPR] = as_unicode('(%s)[%s]' % (expr, rk))
if type(k) is str8:
rk = repr(k)
if len(rk) < REPR_ID_LENGTH:
e[DICT_KEY_EXPR] = as_unicode('(%s)[str8(%s)]' % (expr, rk[1:]))
if not DICT_KEY_EXPR in e:
rk = repr_ltd(k, REPR_ID_LENGTH, encoding = ENCODING_RAW_I)
e[DICT_KEY_EXPR] = as_unicode('_RPDB2_FindRepr((%s), %d)["%s"]' % (expr, REPR_ID_LENGTH, rk.replace('"', '"')))
e[DICT_KEY_NAME] = as_unicode([repr_ltd(k, repr_limit, encoding), k][fForceNames])
e[DICT_KEY_REPR] = repr_ltd(v, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(v)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(v)
snl.append(e)
return snl
al = calc_attribute_list(r, filter_level)
sort(al)
for a in al:
if a == 'm_rpdb2_pwd':
continue
try:
v = getattr(r, a)
except AttributeError:
continue
if len(snl) >= MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
break
is_valid = [True]
e = {}
e[DICT_KEY_EXPR] = as_unicode('(%s).%s' % (expr, a))
e[DICT_KEY_NAME] = as_unicode(a)
e[DICT_KEY_REPR] = repr_ltd(v, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(v)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(v)
snl.append(e)
return snl
def get_exception(self, frame_index, fException):
ctx = self.get_current_ctx()
if is_py3k():
exc_info = ctx.get_exc_info()
if exc_info == None:
return {'type': None, 'value': None, 'traceback': None}
type, value, traceback = exc_info
e = {'type': type, 'value': value, 'traceback': traceback}
return e
try:
f = None
base_frame = None
base_frame = ctx.frame_acquire()
(f, frame_lineno) = ctx.get_frame(base_frame, frame_index, fException)
e = {'type': f.f_exc_type, 'value': f.f_exc_value, 'traceback': f.f_exc_traceback}
return e
finally:
f = None
base_frame = None
ctx.frame_release()
def is_child_of_failure(self, failed_expr_list, expr):
for failed_expr in failed_expr_list:
if expr.startswith(failed_expr):
return True
return False
def calc_expr(self, expr, fExpand, filter_level, frame_index, fException, _globals, _locals, lock, event, rl, index, repr_limit, encoding):
e = {}
try:
__globals = _globals
__locals = _locals
if RPDB_EXEC_INFO in expr:
rpdb_exception_info = self.get_exception(frame_index, fException)
__globals = globals()
__locals = locals()
__locals['_RPDB2_FindRepr'] = _RPDB2_FindRepr
is_valid = [True]
r = eval(expr, __globals, __locals)
e[DICT_KEY_EXPR] = as_unicode(expr)
e[DICT_KEY_REPR] = repr_ltd(r, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(r)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(r)
if fExpand and (e[DICT_KEY_N_SUBNODES] > 0):
fForceNames = (expr in ['globals()', 'locals()']) or (RPDB_EXEC_INFO in expr)
e[DICT_KEY_SUBNODES] = self.__calc_subnodes(expr, r, fForceNames, filter_level, repr_limit, encoding)
e[DICT_KEY_N_SUBNODES] = len(e[DICT_KEY_SUBNODES])
except:
print_debug_exception()
e[DICT_KEY_ERROR] = as_unicode(safe_repr(sys.exc_info()))
lock.acquire()
if len(rl) == index:
rl.append(e)
lock.release()
event.set()
def __calc_encoding(self, encoding, fvalidate = False, filename = None):
if encoding != ENCODING_AUTO and not fvalidate:
return encoding
if encoding != ENCODING_AUTO:
try:
codecs.lookup(encoding)
return encoding
except:
pass
if filename == None:
ctx = self.get_current_ctx()
filename = ctx.m_code_context.m_filename
try:
encoding = get_file_encoding(filename)
return encoding
except:
return 'utf-8'
def get_namespace(self, nl, filter_level, frame_index, fException, repr_limit, encoding, fraw):
if fraw:
encoding = ENCODING_RAW_I
else:
encoding = self.__calc_encoding(encoding, fvalidate = True)
try:
(_globals, _locals, x) = self.__get_locals_globals(frame_index, fException, fReadOnly = True)
except:
print_debug_exception()
raise
failed_expr_list = []
rl = []
index = 0
lock = threading.Condition()
for (expr, fExpand) in nl:
if self.is_child_of_failure(failed_expr_list, expr):
continue
event = threading.Event()
args = (expr, fExpand, filter_level, frame_index, fException, _globals, _locals, lock, event, rl, index, repr_limit, encoding)
if self.m_fsynchronicity:
g_server.m_work_queue.post_work_item(target = self.calc_expr, args = args, name = 'calc_expr %s' % expr)
else:
try:
ctx = self.get_current_ctx()
tid = ctx.m_thread_id
send_job(tid, 0, self.calc_expr, *args)
except:
pass
safe_wait(event, 2)
lock.acquire()
if len(rl) == index:
rl.append('error')
failed_expr_list.append(expr)
index += 1
lock.release()
if len(failed_expr_list) > 3:
break
_rl = [r for r in rl if r != 'error']
return _rl
def evaluate(self, expr, frame_index, fException, encoding, fraw):
"""
Evaluate expression in context of frame at depth 'frame-index'.
"""
result = [(as_unicode(''), as_unicode(STR_SYNCHRONICITY_BAD), as_unicode(''))]
if self.m_fsynchronicity:
self._evaluate(result, expr, frame_index, fException, encoding, fraw)
else:
try:
ctx = self.get_current_ctx()
tid = ctx.m_thread_id
send_job(tid, 1000, self._evaluate, result, expr, frame_index, fException, encoding, fraw)
except:
pass
return result[-1]
def _evaluate(self, result, expr, frame_index, fException, encoding, fraw):
"""
Evaluate expression in context of frame at depth 'frame-index'.
"""
encoding = self.__calc_encoding(encoding)
(_globals, _locals, x) = self.__get_locals_globals(frame_index, fException)
v = ''
w = ''
e = ''
try:
if '_rpdb2_pwd' in expr or '_rpdb2_args' in expr:
r = '...Removed-password-from-output...'
else:
_expr = as_bytes(ENCODING_SOURCE % encoding + expr, encoding, fstrict = True)
if '_RPDB2_builtins' in expr:
_locals['_RPDB2_builtins'] = vars(g_builtins_module)
try:
redirect_exc_info = True
r = eval(_expr, _globals, _locals)
finally:
del redirect_exc_info
if '_RPDB2_builtins' in expr:
del _locals['_RPDB2_builtins']
if fraw:
encoding = ENCODING_RAW_I
v = repr_ltd(r, MAX_EVALUATE_LENGTH, encoding)
if len(v) > MAX_EVALUATE_LENGTH:
v += '... *** %s ***' % STR_MAX_EVALUATE_LENGTH_WARNING
w = STR_MAX_EVALUATE_LENGTH_WARNING
except:
exc_info = sys.exc_info()
e = "%s, %s" % (safe_str(exc_info[0]), safe_str(exc_info[1]))
self.notify_namespace()
result.append((as_unicode(v), as_unicode(w), as_unicode(e)))
def execute(self, suite, frame_index, fException, encoding):
"""
Execute suite (Python statement) in context of frame at
depth 'frame-index'.
"""
result = [(as_unicode(STR_SYNCHRONICITY_BAD), as_unicode(''))]
if self.m_fsynchronicity:
self._execute(result, suite, frame_index, fException, encoding)
else:
try:
ctx = self.get_current_ctx()
tid = ctx.m_thread_id
send_job(tid, 1000, self._execute, result, suite, frame_index, fException, encoding)
except:
pass
return result[-1]
def _execute(self, result, suite, frame_index, fException, encoding):
"""
Execute suite (Python statement) in context of frame at
depth 'frame-index'.
"""
print_debug('exec called with: ' + repr(suite))
encoding = self.__calc_encoding(encoding)
(_globals, _locals, _original_locals_copy) = self.__get_locals_globals(frame_index, fException)
if frame_index > 0 and not _globals is _locals:
_locals_copy = copy.copy(_locals)
w = ''
e = ''
try:
if '_RPDB2_FindRepr' in suite and not '_RPDB2_FindRepr' in _original_locals_copy:
_locals['_RPDB2_FindRepr'] = _RPDB2_FindRepr
try:
_suite = as_bytes(ENCODING_SOURCE % encoding + suite, encoding, fstrict = True)
#print_debug('suite is %s' % repr(_suite))
_code = compile(_suite, '<string>', 'exec')
try:
redirect_exc_info = True
exec(_code, _globals, _locals)
finally:
del redirect_exc_info
finally:
if '_RPDB2_FindRepr' in suite and not '_RPDB2_FindRepr' in _original_locals_copy:
del _locals['_RPDB2_FindRepr']
except:
exc_info = sys.exc_info()
e = "%s, %s" % (safe_str(exc_info[0]), safe_str(exc_info[1]))
if frame_index > 0 and (not _globals is _locals) and _locals != _locals_copy:
l = [(k, safe_repr(v)) for k, v in _locals.items()]
sl = set(l)
lc = [(k, safe_repr(v)) for k, v in _locals_copy.items()]
slc = set(lc)
nsc = [k for (k, v) in sl - slc if k in _original_locals_copy]
if len(nsc) != 0:
w = STR_LOCAL_NAMESPACE_WARNING
self.notify_namespace()
result.append((as_unicode(w), as_unicode(e)))
def __decode_thread_name(self, name):
name = as_unicode(name)
return name
def get_thread_list(self):
"""
Return thread list with tid, state, and last event of each thread.
"""
ctx = self.get_current_ctx()
if ctx is None:
current_thread_id = -1
else:
current_thread_id = ctx.m_thread_id
ctx_list = list(self.get_threads().values())
tl = []
for c in ctx_list:
d = {}
d[DICT_KEY_TID] = c.m_thread_id
d[DICT_KEY_NAME] = self.__decode_thread_name(c.m_thread_name)
d[DICT_KEY_BROKEN] = c.m_fBroken
d[DICT_KEY_EVENT] = as_unicode(c.m_event)
tl.append(d)
return (current_thread_id, tl)
def stop_debuggee(self):
"""
Notify the client and terminate this proccess.
"""
g_server.m_work_queue.post_work_item(target = _atexit, args = (True, ), name = '_atexit')
def set_synchronicity(self, fsynchronicity):
self.m_fsynchronicity = fsynchronicity
event = CEventSynchronicity(fsynchronicity)
self.m_event_dispatcher.fire_event(event)
if self.m_state_manager.get_state() == STATE_BROKEN:
self.notify_namespace()
def set_trap_unhandled_exceptions(self, ftrap):
self.m_ftrap = ftrap
event = CEventTrap(ftrap)
self.m_event_dispatcher.fire_event(event)
def is_unhandled_exception(self):
return self.m_fUnhandledException
def set_fork_mode(self, ffork_into_child, ffork_auto):
self.m_ffork_into_child = ffork_into_child
self.m_ffork_auto = ffork_auto
event = CEventForkMode(ffork_into_child, ffork_auto)
self.m_event_dispatcher.fire_event(event)
def set_environ(self, envmap):
global g_fignorefork
print_debug('Entered set_environ() with envmap = %s' % repr(envmap))
if len(envmap) == 0:
return
old_pythonpath = os.environ.get('PYTHONPATH', '')
encoding = detect_locale()
for k, v in envmap:
try:
k = as_string(k, encoding, fstrict = True)
v = as_string(v, encoding, fstrict = True)
except:
continue
command = 'echo %s' % v
try:
g_fignorefork = True
f = platform.popen(command)
finally:
g_fignorefork = False
value = f.read()
f.close()
if value[-1:] == '\n':
value = value[:-1]
os.environ[k] = value
if 'PYTHONPATH' in [k for (k, v) in envmap]:
recalc_sys_path(old_pythonpath)
#
# ------------------------------------- RPC Server --------------------------------------------
#
class CWorkQueue:
"""
Worker threads pool mechanism for RPC server.
"""
def __init__(self, size = N_WORK_QUEUE_THREADS):
self.m_lock = threading.Condition()
self.m_work_items = []
self.m_f_shutdown = False
self.m_size = size
self.m_n_threads = 0
self.m_n_available = 0
self.__create_thread()
def __create_thread(self):
t = CThread(name = '__worker_target', target = self.__worker_target, shutdown = self.shutdown)
#thread_set_daemon(t, True)
t.start()
def shutdown(self):
"""
Signal worker threads to exit, and wait until they do.
"""
if self.m_f_shutdown:
return
print_debug('Shutting down worker queue...')
self.m_lock.acquire()
self.m_f_shutdown = True
lock_notify_all(self.m_lock)
t0 = time.time()
while self.m_n_threads > 0:
if time.time() - t0 > SHUTDOWN_TIMEOUT:
self.m_lock.release()
print_debug('Shut down of worker queue has TIMED OUT!')
return
safe_wait(self.m_lock, 0.1)
self.m_lock.release()
print_debug('Shutting down worker queue, done.')
def __worker_target(self):
try:
self.m_lock.acquire()
self.m_n_threads += 1
self.m_n_available += 1
fcreate_thread = not self.m_f_shutdown and self.m_n_threads < self.m_size
self.m_lock.release()
if fcreate_thread:
self.__create_thread()
self.m_lock.acquire()
while not self.m_f_shutdown:
safe_wait(self.m_lock)
if self.m_f_shutdown:
break
if len(self.m_work_items) == 0:
continue
fcreate_thread = self.m_n_available == 1
(target, args, name) = self.m_work_items.pop()
self.m_n_available -= 1
self.m_lock.release()
if fcreate_thread:
print_debug('Creating an extra worker thread.')
self.__create_thread()
thread_set_name(current_thread(), '__worker_target - ' + name)
try:
target(*args)
except:
print_debug_exception()
thread_set_name(current_thread(), '__worker_target')
self.m_lock.acquire()
self.m_n_available += 1
if self.m_n_available > self.m_size:
break
self.m_n_threads -= 1
self.m_n_available -= 1
lock_notify_all(self.m_lock)
finally:
self.m_lock.release()
def post_work_item(self, target, args, name = ''):
if self.m_f_shutdown:
return
try:
self.m_lock.acquire()
if self.m_f_shutdown:
return
self.m_work_items.append((target, args, name))
self.m_lock.notify()
finally:
self.m_lock.release()
#
# MOD
#
class CUnTracedThreadingMixIn(SocketServer.ThreadingMixIn):
"""
Modification of SocketServer.ThreadingMixIn that uses a worker thread
queue instead of spawning threads to process requests.
This mod was needed to resolve deadlocks that were generated in some
circumstances.
"""
def process_request(self, request, client_address):
g_server.m_work_queue.post_work_item(target = SocketServer.ThreadingMixIn.process_request_thread, args = (self, request, client_address), name = 'process_request')
#
# MOD
#
def my_xmlrpclib_loads(data):
"""
Modification of Python 2.3 xmlrpclib.loads() that does not do an
import. Needed to prevent deadlocks.
"""
p, u = xmlrpclib.getparser()
p.feed(data)
p.close()
return u.close(), u.getmethodname()
#
# MOD
#
class CXMLRPCServer(CUnTracedThreadingMixIn, SimpleXMLRPCServer.SimpleXMLRPCServer):
if os.name == POSIX:
allow_reuse_address = True
else:
allow_reuse_address = False
"""
Modification of Python 2.3 SimpleXMLRPCServer.SimpleXMLRPCDispatcher
that uses my_xmlrpclib_loads(). Needed to prevent deadlocks.
"""
def __marshaled_dispatch(self, data, dispatch_method = None):
params, method = my_xmlrpclib_loads(data)
# generate response
try:
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = xmlrpclib.dumps(response, methodresponse=1)
except xmlrpclib.Fault:
fault = sys.exc_info()[1]
response = xmlrpclib.dumps(fault)
except:
# report exception back to server
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value))
)
print_debug_exception()
return response
if sys.version_info[:2] <= (2, 3):
_marshaled_dispatch = __marshaled_dispatch
#def server_activate(self):
# self.socket.listen(1)
def handle_error(self, request, client_address):
print_debug("handle_error() in pid %d" % _getpid())
if g_ignore_broken_pipe + 5 > time.time():
return
return SimpleXMLRPCServer.SimpleXMLRPCServer.handle_error(self, request, client_address)
class CPwdServerProxy:
"""
Encrypted proxy to the debuggee.
Works by wrapping a xmlrpclib.ServerProxy object.
"""
def __init__(self, crypto, uri, transport = None, target_rid = 0):
self.m_crypto = crypto
self.m_proxy = xmlrpclib.ServerProxy(uri, transport)
self.m_fEncryption = is_encryption_supported()
self.m_target_rid = target_rid
self.m_method = getattr(self.m_proxy, DISPACHER_METHOD)
def __set_encryption(self, fEncryption):
self.m_fEncryption = fEncryption
def get_encryption(self):
return self.m_fEncryption
def __request(self, name, params):
"""
Call debuggee method 'name' with parameters 'params'.
"""
while True:
try:
#
# Encrypt method and params.
#
fencrypt = self.get_encryption()
args = (as_unicode(name), params, self.m_target_rid)
(fcompress, digest, msg) = self.m_crypto.do_crypto(args, fencrypt)
rpdb_version = as_unicode(get_interface_compatibility_version())
r = self.m_method(rpdb_version, fencrypt, fcompress, digest, msg)
(fencrypt, fcompress, digest, msg) = r
#
# Decrypt response.
#
((max_index, _r, _e), id) = self.m_crypto.undo_crypto(fencrypt, fcompress, digest, msg, fVerifyIndex = False)
if _e is not None:
raise _e
except AuthenticationBadIndex:
e = sys.exc_info()[1]
self.m_crypto.set_index(e.m_max_index, e.m_anchor)
continue
except xmlrpclib.Fault:
fault = sys.exc_info()[1]
if class_name(BadVersion) in fault.faultString:
s = fault.faultString.split("'")
version = ['', s[1]][len(s) > 0]
raise BadVersion(version)
if class_name(EncryptionExpected) in fault.faultString:
raise EncryptionExpected
elif class_name(EncryptionNotSupported) in fault.faultString:
if self.m_crypto.m_fAllowUnencrypted:
self.__set_encryption(False)
continue
raise EncryptionNotSupported
elif class_name(DecryptionFailure) in fault.faultString:
raise DecryptionFailure
elif class_name(AuthenticationBadData) in fault.faultString:
raise AuthenticationBadData
elif class_name(AuthenticationFailure) in fault.faultString:
raise AuthenticationFailure
else:
print_debug_exception()
assert False
except xmlrpclib.ProtocolError:
print_debug("Caught ProtocolError for %s" % name)
#print_debug_exception()
raise CConnectionException
return _r
def __getattr__(self, name):
return xmlrpclib._Method(self.__request, name)
class CIOServer:
"""
Base class for debuggee server.
"""
def __init__(self, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, rid):
assert(is_unicode(_rpdb2_pwd))
assert(is_unicode(rid))
self.m_thread = None
self.m_crypto = CCrypto(_rpdb2_pwd, fAllowUnencrypted, rid)
self.m_fAllowRemote = fAllowRemote
self.m_rid = rid
self.m_port = None
self.m_stop = False
self.m_server = None
self.m_work_queue = None
def shutdown(self):
self.stop()
def start(self):
self.m_thread = CThread(name = 'ioserver', target = self.run, shutdown = self.shutdown)
thread_set_daemon(self.m_thread, True)
self.m_thread.start()
def jumpstart(self):
self.m_stop = False
self.start()
def stop(self):
if self.m_stop:
return
print_debug('Stopping IO server... (pid = %d)' % _getpid())
self.m_stop = True
while thread_is_alive(self.m_thread):
try:
proxy = CPwdServerProxy(self.m_crypto, calcURL(LOOPBACK, self.m_port), CLocalTimeoutTransport())
proxy.null()
except (socket.error, CException):
pass
self.m_thread.join(0.5)
self.m_thread = None
self.m_work_queue.shutdown()
#try:
# self.m_server.socket.close()
#except:
# pass
print_debug('Stopping IO server, done.')
def export_null(self):
return 0
def run(self):
if self.m_server == None:
(self.m_port, self.m_server) = self.__StartXMLRPCServer()
self.m_work_queue = CWorkQueue()
self.m_server.register_function(self.dispatcher_method)
while not self.m_stop:
self.m_server.handle_request()
def dispatcher_method(self, rpdb_version, fencrypt, fcompress, digest, msg):
"""
Process RPC call.
"""
#print_debug('dispatcher_method() called with: %s, %s, %s, %s' % (rpdb_version, fencrypt, digest, msg[:100]))
if rpdb_version != as_unicode(get_interface_compatibility_version()):
raise BadVersion(as_unicode(get_version()))
try:
try:
#
# Decrypt parameters.
#
((name, __params, target_rid), client_id) = self.m_crypto.undo_crypto(fencrypt, fcompress, digest, msg)
except AuthenticationBadIndex:
e = sys.exc_info()[1]
#print_debug_exception()
#
# Notify the caller on the expected index.
#
max_index = self.m_crypto.get_max_index()
args = (max_index, None, e)
(fcompress, digest, msg) = self.m_crypto.do_crypto(args, fencrypt)
return (fencrypt, fcompress, digest, msg)
r = None
e = None
try:
#
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
#
func = getattr(self, 'export_' + name)
except AttributeError:
raise Exception('method "%s" is not supported' % ('export_' + name))
try:
if (target_rid != 0) and (target_rid != self.m_rid):
raise NotAttached
#
# Record that client id is still attached.
#
self.record_client_heartbeat(client_id, name, __params)
r = func(*__params)
except Exception:
_e = sys.exc_info()[1]
print_debug_exception()
e = _e
#
# Send the encrypted result.
#
max_index = self.m_crypto.get_max_index()
args = (max_index, r, e)
(fcompress, digest, msg) = self.m_crypto.do_crypto(args, fencrypt)
return (fencrypt, fcompress, digest, msg)
except:
print_debug_exception()
raise
def __StartXMLRPCServer(self):
"""
As the name says, start the XML RPC server.
Looks for an available tcp port to listen on.
"""
host = [LOOPBACK, ""][self.m_fAllowRemote]
port = SERVER_PORT_RANGE_START
while True:
try:
server = CXMLRPCServer((host, port), logRequests = 0)
return (port, server)
except socket.error:
e = sys.exc_info()[1]
if GetSocketError(e) != errno.EADDRINUSE:
raise
if port >= SERVER_PORT_RANGE_START + SERVER_PORT_RANGE_LENGTH - 1:
raise
port += 1
continue
def record_client_heartbeat(self, id, name, params):
pass
class CServerInfo(object):
def __init__(self, age, port, pid, filename, rid, state, fembedded):
assert(is_unicode(rid))
self.m_age = age
self.m_port = port
self.m_pid = pid
self.m_filename = as_unicode(filename, sys.getfilesystemencoding())
self.m_module_name = as_unicode(CalcModuleName(filename), sys.getfilesystemencoding())
self.m_rid = rid
self.m_state = as_unicode(state)
self.m_fembedded = fembedded
def __reduce__(self):
rv = (copy_reg.__newobj__, (type(self), ), vars(self), None, None)
return rv
def __str__(self):
return 'age: %d, port: %d, pid: %d, filename: %s, rid: %s' % (self.m_age, self.m_port, self.m_pid, self.m_filename, self.m_rid)
class CDebuggeeServer(CIOServer):
"""
The debuggee XML RPC server class.
"""
def __init__(self, filename, debugger, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, rid = None):
if rid is None:
rid = generate_rid()
assert(is_unicode(_rpdb2_pwd))
assert(is_unicode(rid))
CIOServer.__init__(self, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, rid)
self.m_filename = filename
self.m_pid = _getpid()
self.m_time = time.time()
self.m_debugger = debugger
self.m_rid = rid
def shutdown(self):
CIOServer.shutdown(self)
def record_client_heartbeat(self, id, name, params):
finit = (name == 'request_break')
fdetach = (name == 'request_go' and True in params)
self.m_debugger.record_client_heartbeat(id, finit, fdetach)
def export_null(self):
return self.m_debugger.send_event_null()
def export_server_info(self):
age = time.time() - self.m_time
state = self.m_debugger.get_state()
fembedded = self.m_debugger.is_embedded()
si = CServerInfo(age, self.m_port, self.m_pid, self.m_filename, self.m_rid, state, fembedded)
return si
def export_sync_with_events(self, fException, fSendUnhandled):
ei = self.m_debugger.sync_with_events(fException, fSendUnhandled)
return ei
def export_wait_for_event(self, timeout, event_index):
(new_event_index, s) = self.m_debugger.wait_for_event(timeout, event_index)
return (new_event_index, s)
def export_set_breakpoint(self, filename, scope, lineno, fEnabled, expr, frame_index, fException, encoding):
self.m_debugger.set_breakpoint(filename, scope, lineno, fEnabled, expr, frame_index, fException, encoding)
return 0
def export_disable_breakpoint(self, id_list, fAll):
self.m_debugger.disable_breakpoint(id_list, fAll)
return 0
def export_enable_breakpoint(self, id_list, fAll):
self.m_debugger.enable_breakpoint(id_list, fAll)
return 0
def export_delete_breakpoint(self, id_list, fAll):
self.m_debugger.delete_breakpoint(id_list, fAll)
return 0
def export_get_breakpoints(self):
bpl = self.m_debugger.get_breakpoints()
return bpl
def export_request_break(self):
self.m_debugger.request_break()
return 0
def export_request_go(self, fdetach = False):
self.m_debugger.request_go()
return 0
def export_request_go_breakpoint(self, filename, scope, lineno, frame_index, fException):
self.m_debugger.request_go_breakpoint(filename, scope, lineno, frame_index, fException)
return 0
def export_request_step(self):
self.m_debugger.request_step()
return 0
def export_request_next(self):
self.m_debugger.request_next()
return 0
def export_request_return(self):
self.m_debugger.request_return()
return 0
def export_request_jump(self, lineno):
self.m_debugger.request_jump(lineno)
return 0
def export_get_stack(self, tid_list, fAll, fException):
r = self.m_debugger.get_stack(tid_list, fAll, fException)
return r
def export_get_source_file(self, filename, lineno, nlines, frame_index, fException):
r = self.m_debugger.get_source_file(filename, lineno, nlines, frame_index, fException)
return r
def export_get_source_lines(self, nlines, fAll, frame_index, fException):
r = self.m_debugger.get_source_lines(nlines, fAll, frame_index, fException)
return r
def export_get_thread_list(self):
r = self.m_debugger.get_thread_list()
return r
def export_set_thread(self, tid):
self.m_debugger.set_thread(tid)
return 0
def export_get_namespace(self, nl, filter_level, frame_index, fException, repr_limit, encoding, fraw):
r = self.m_debugger.get_namespace(nl, filter_level, frame_index, fException, repr_limit, encoding, fraw)
return r
def export_evaluate(self, expr, frame_index, fException, encoding, fraw):
(v, w, e) = self.m_debugger.evaluate(expr, frame_index, fException, encoding, fraw)
return (v, w, e)
def export_execute(self, suite, frame_index, fException, encoding):
(w, e) = self.m_debugger.execute(suite, frame_index, fException, encoding)
return (w, e)
def export_stop_debuggee(self):
self.m_debugger.stop_debuggee()
return 0
def export_set_synchronicity(self, fsynchronicity):
self.m_debugger.set_synchronicity(fsynchronicity)
return 0
def export_set_trap_unhandled_exceptions(self, ftrap):
self.m_debugger.set_trap_unhandled_exceptions(ftrap)
return 0
def export_is_unhandled_exception(self):
return self.m_debugger.is_unhandled_exception()
def export_set_fork_mode(self, ffork_into_child, ffork_auto):
self.m_debugger.set_fork_mode(ffork_into_child, ffork_auto)
return 0
def export_set_environ(self, envmap):
self.m_debugger.set_environ(envmap)
return 0
def export_embedded_sync(self):
self.m_debugger.embedded_sync()
return 0
#
# ------------------------------------- RPC Client --------------------------------------------
#
#
# MOD
#
class CTimeoutHTTPConnection(httplib.HTTPConnection):
"""
Modification of httplib.HTTPConnection with timeout for sockets.
"""
_rpdb2_timeout = PING_TIMEOUT
def connect(self):
"""Connect to the host and port specified in __init__."""
# New Python version of connect().
if hasattr(self, 'timeout'):
self.timeout = self._rpdb2_timeout
return httplib.HTTPConnection.connect(self)
# Old Python version of connect().
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.settimeout(self._rpdb2_timeout)
if self.debuglevel > 0:
print_debug("connect: (%s, %s)" % (self.host, self.port))
self.sock.connect(sa)
except socket.error:
msg = sys.exc_info()[1]
if self.debuglevel > 0:
print_debug('connect fail: ' + repr((self.host, self.port)))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error(msg)
#
# MOD
#
class CLocalTimeoutHTTPConnection(CTimeoutHTTPConnection):
"""
Modification of httplib.HTTPConnection with timeout for sockets.
"""
_rpdb2_timeout = LOCAL_TIMEOUT
if is_py3k():
class httplib_HTTP(object):
pass
else:
httplib_HTTP = httplib.HTTP
#
# MOD
#
class CTimeoutHTTP(httplib_HTTP):
"""
Modification of httplib.HTTP with timeout for sockets.
"""
_connection_class = CTimeoutHTTPConnection
#
# MOD
#
class CLocalTimeoutHTTP(httplib_HTTP):
"""
Modification of httplib.HTTP with timeout for sockets.
"""
_connection_class = CLocalTimeoutHTTPConnection
#
# MOD
#
class CLocalTransport(xmlrpclib.Transport):
"""
Modification of xmlrpclib.Transport to work around Zonealarm sockets
bug.
"""
_connection_class = httplib.HTTPConnection
_connection_class_old = httplib_HTTP
def make_connection(self, host):
# New Python version of connect().
# However, make_connection is hacked to always create a new connection
# Otherwise all threads use single connection and crash.
if hasattr(self, '_connection'):
chost, self._extra_headers, x509 = self.get_host_info(host)
return self._connection_class(chost)
# Old Python version of connect().
# create a HTTP connection object from a host descriptor
host, extra_headers, x509 = self.get_host_info(host)
return self._connection_class_old(host)
def __parse_response(self, file, sock):
# read response from input file/socket, and parse it
p, u = self.getparser()
while 1:
if sock:
response = sock.recv(1024)
else:
time.sleep(0.002)
response = file.read(1024)
if not response:
break
if self.verbose:
_print("body: " + repr(response))
p.feed(response)
file.close()
p.close()
return u.close()
if os.name == 'nt':
_parse_response = __parse_response
#
# MOD
#
class CTimeoutTransport(CLocalTransport):
"""
Modification of xmlrpclib.Transport with timeout for sockets.
"""
_connection_class = CTimeoutHTTPConnection
_connection_class_old = CTimeoutHTTP
#
# MOD
#
class CLocalTimeoutTransport(CLocalTransport):
"""
Modification of xmlrpclib.Transport with timeout for sockets.
"""
_connection_class = CLocalTimeoutHTTPConnection
_connection_class_old = CLocalTimeoutHTTP
class CSession:
"""
Basic class that communicates with the debuggee server.
"""
def __init__(self, host, port, _rpdb2_pwd, fAllowUnencrypted, rid):
self.m_crypto = CCrypto(_rpdb2_pwd, fAllowUnencrypted, rid)
self.m_host = host
self.m_port = port
self.m_proxy = None
self.m_server_info = None
self.m_exc_info = None
self.m_fShutDown = False
self.m_fRestart = False
def get_encryption(self):
return self.m_proxy.get_encryption()
def getServerInfo(self):
return self.m_server_info
def pause(self):
self.m_fRestart = True
def restart(self, sleep = 0, timeout = 10):
self.m_fRestart = True
time.sleep(sleep)
t0 = time.time()
try:
try:
while time.time() < t0 + timeout:
try:
self.Connect()
return
except socket.error:
continue
raise CConnectionException
except:
self.m_fShutDown = True
raise
finally:
self.m_fRestart = False
def shut_down(self):
self.m_fShutDown = True
def getProxy(self):
"""
Return the proxy object.
With this object you can invoke methods on the server.
"""
while self.m_fRestart:
time.sleep(0.1)
if self.m_fShutDown:
raise NotAttached
return self.m_proxy
def ConnectAsync(self):
t = threading.Thread(target = self.ConnectNoThrow)
#thread_set_daemon(t, True)
t.start()
return t
def ConnectNoThrow(self):
try:
self.Connect()
except:
self.m_exc_info = sys.exc_info()
def Connect(self):
host = self.m_host
if host.lower() == LOCALHOST:
host = LOOPBACK
server = CPwdServerProxy(self.m_crypto, calcURL(host, self.m_port), CTimeoutTransport())
server_info = server.server_info()
self.m_proxy = CPwdServerProxy(self.m_crypto, calcURL(host, self.m_port), CLocalTransport(), target_rid = server_info.m_rid)
self.m_server_info = server_info
def isConnected(self):
return self.m_proxy is not None
class CServerList:
def __init__(self, host):
self.m_host = host
self.m_list = []
self.m_errors = {}
def calcList(self, _rpdb2_pwd, rid, key = None):
sil = []
sessions = []
self.m_errors = {}
port = SERVER_PORT_RANGE_START
while port < SERVER_PORT_RANGE_START + SERVER_PORT_RANGE_LENGTH:
s = CSession(self.m_host, port, _rpdb2_pwd, fAllowUnencrypted = True, rid = rid)
t = s.ConnectAsync()
sessions.append((s, t))
port += 1
for (s, t) in sessions:
t.join()
if (s.m_exc_info is not None):
if not issubclass(s.m_exc_info[0], socket.error):
self.m_errors.setdefault(s.m_exc_info[0], []).append(s.m_exc_info)
continue
si = s.getServerInfo()
if si is not None:
sil.append((-si.m_age, si))
sil.sort()
self.m_list = [s[1] for s in sil]
if key != None:
try:
return self.findServers(key)[0]
except:
pass
if key != None:
raise UnknownServer
sil.sort()
self.m_list = [s[1] for s in sil]
return self.m_list
def get_errors(self):
return self.m_errors
def findServers(self, key):
try:
n = int(key)
_s = [s for s in self.m_list if (s.m_pid == n) or (s.m_rid == key)]
except ValueError:
key = as_string(key, sys.getfilesystemencoding())
_s = [s for s in self.m_list if key in s.m_filename]
if _s == []:
raise UnknownServer
return _s
class CSessionManagerInternal:
def __init__(self, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host):
self.m_rpdb2_pwd = [_rpdb2_pwd, None][_rpdb2_pwd in [None, '']]
self.m_fAllowUnencrypted = fAllowUnencrypted
self.m_fAllowRemote = fAllowRemote
self.m_rid = generate_rid()
self.m_host = host
self.m_server_list_object = CServerList(host)
self.m_session = None
self.m_server_info = None
self.m_worker_thread = None
self.m_worker_thread_ident = None
self.m_fStop = False
self.m_stack_depth = None
self.m_stack_depth_exception = None
self.m_frame_index = 0
self.m_frame_index_exception = 0
self.m_completions = {}
self.m_remote_event_index = 0
self.m_event_dispatcher_proxy = CEventDispatcher()
self.m_event_dispatcher = CEventDispatcher(self.m_event_dispatcher_proxy)
self.m_state_manager = CStateManager(STATE_DETACHED, self.m_event_dispatcher, self.m_event_dispatcher_proxy)
self.m_breakpoints_proxy = CBreakPointsManagerProxy(self)
event_type_dict = {CEventState: {EVENT_EXCLUDE: [STATE_BROKEN, STATE_ANALYZE]}}
self.register_callback(self.reset_frame_indexes, event_type_dict, fSingleUse = False)
event_type_dict = {CEventStackDepth: {}}
self.register_callback(self.set_stack_depth, event_type_dict, fSingleUse = False)
event_type_dict = {CEventNoThreads: {}}
self.register_callback(self._reset_frame_indexes, event_type_dict, fSingleUse = False)
event_type_dict = {CEventExit: {}}
self.register_callback(self.on_event_exit, event_type_dict, fSingleUse = False)
event_type_dict = {CEventConflictingModules: {}}
self.register_callback(self.on_event_conflicting_modules, event_type_dict, fSingleUse = False)
event_type_dict = {CEventSignalIntercepted: {}}
self.register_callback(self.on_event_signal_intercept, event_type_dict, fSingleUse = False)
event_type_dict = {CEventSignalException: {}}
self.register_callback(self.on_event_signal_exception, event_type_dict, fSingleUse = False)
event_type_dict = {CEventEmbeddedSync: {}}
self.register_callback(self.on_event_embedded_sync, event_type_dict, fSingleUse = False)
event_type_dict = {CEventSynchronicity: {}}
self.m_event_dispatcher_proxy.register_callback(self.on_event_synchronicity, event_type_dict, fSingleUse = False)
self.m_event_dispatcher.register_chain_override(event_type_dict)
event_type_dict = {CEventTrap: {}}
self.m_event_dispatcher_proxy.register_callback(self.on_event_trap, event_type_dict, fSingleUse = False)
self.m_event_dispatcher.register_chain_override(event_type_dict)
event_type_dict = {CEventForkMode: {}}
self.m_event_dispatcher_proxy.register_callback(self.on_event_fork_mode, event_type_dict, fSingleUse = False)
self.m_event_dispatcher.register_chain_override(event_type_dict)
self.m_printer = self.__nul_printer
self.m_last_command_line = None
self.m_last_fchdir = None
self.m_fsynchronicity = True
self.m_ftrap = True
self.m_ffork_into_child = False
self.m_ffork_auto = False
self.m_environment = []
self.m_encoding = ENCODING_AUTO
self.m_fraw = False
def shutdown(self):
self.m_event_dispatcher_proxy.shutdown()
self.m_event_dispatcher.shutdown()
self.m_state_manager.shutdown()
def __nul_printer(self, _str):
pass
def set_printer(self, printer):
self.m_printer = printer
def register_callback(self, callback, event_type_dict, fSingleUse):
return self.m_event_dispatcher.register_callback(callback, event_type_dict, fSingleUse)
def remove_callback(self, callback):
return self.m_event_dispatcher.remove_callback(callback)
def __wait_for_debuggee(self, rid):
try:
time.sleep(STARTUP_TIMEOUT / 2)
for i in range(STARTUP_RETRIES):
try:
print_debug('Scanning for debuggee...')
t0 = time.time()
return self.m_server_list_object.calcList(self.m_rpdb2_pwd, self.m_rid, rid)
except UnknownServer:
dt = time.time() - t0
if dt < STARTUP_TIMEOUT:
time.sleep(STARTUP_TIMEOUT - dt)
continue
return self.m_server_list_object.calcList(self.m_rpdb2_pwd, self.m_rid, rid)
finally:
errors = self.m_server_list_object.get_errors()
self.__report_server_errors(errors, fsupress_pwd_warning = True)
def get_encryption(self):
return self.getSession().get_encryption()
def launch(self, fchdir, command_line, fload_breakpoints = True):
assert(is_unicode(command_line))
self.__verify_unattached()
if not os.name in [POSIX, 'nt']:
self.m_printer(STR_SPAWN_UNSUPPORTED)
raise SpawnUnsupported
if g_fFirewallTest:
firewall_test = CFirewallTest(self.get_remote())
if not firewall_test.run():
raise FirewallBlock
else:
print_debug('Skipping firewall test.')
if self.m_rpdb2_pwd is None:
self.set_random_password()
if command_line == '':
raise BadArgument
(path, filename, args) = split_command_line_path_filename_args(command_line)
#if not IsPythonSourceFile(filename):
# raise NotPythonSource
_filename = my_os_path_join(path, filename)
ExpandedFilename = FindFile(_filename)
self.set_host(LOCALHOST)
self.m_printer(STR_STARTUP_SPAWN_NOTICE)
rid = generate_rid()
create_pwd_file(rid, self.m_rpdb2_pwd)
self.m_state_manager.set_state(STATE_SPAWNING)
try:
try:
self._spawn_server(fchdir, ExpandedFilename, args, rid)
server = self.__wait_for_debuggee(rid)
self.attach(server.m_rid, server.m_filename, fsupress_pwd_warning = True, fsetenv = True, ffirewall_test = False, server = server, fload_breakpoints = fload_breakpoints)
self.m_last_command_line = command_line
self.m_last_fchdir = fchdir
except:
if self.m_state_manager.get_state() != STATE_DETACHED:
self.m_state_manager.set_state(STATE_DETACHED)
raise
finally:
delete_pwd_file(rid)
def restart(self):
"""
Restart debug session with same command_line and fchdir arguments
which were used in last launch.
"""
if None in (self.m_last_fchdir, self.m_last_command_line):
return
if self.m_state_manager.get_state() != STATE_DETACHED:
self.stop_debuggee()
self.launch(self.m_last_fchdir, self.m_last_command_line)
def get_launch_args(self):
"""
Return command_line and fchdir arguments which were used in last
launch as (last_fchdir, last_command_line).
Returns None if there is no info.
"""
if None in (self.m_last_fchdir, self.m_last_command_line):
return (None, None)
return (self.m_last_fchdir, self.m_last_command_line)
def _spawn_server(self, fchdir, ExpandedFilename, args, rid):
"""
Start an OS console to act as server.
What it does is to start rpdb again in a new console in server only mode.
"""
if g_fScreen:
name = SCREEN
elif sys.platform == DARWIN:
name = DARWIN
else:
try:
import terminalcommand
name = MAC
except:
name = os.name
if name == 'nt' and g_fDebug:
name = NT_DEBUG
e = ['', ' --encrypt'][not self.m_fAllowUnencrypted]
r = ['', ' --remote'][self.m_fAllowRemote]
c = ['', ' --chdir'][fchdir]
p = ['', ' --pwd="%s"' % self.m_rpdb2_pwd][os.name == 'nt']
b = ''
encoding = detect_locale()
fse = sys.getfilesystemencoding()
ExpandedFilename = g_found_unicode_files.get(ExpandedFilename, ExpandedFilename)
ExpandedFilename = as_unicode(ExpandedFilename, fse)
if as_bytes('?') in as_bytes(ExpandedFilename, encoding, fstrict = False):
_u = as_bytes(ExpandedFilename)
_b = base64.encodestring(_u)
_b = _b.strip(as_bytes('\n')).translate(g_safe_base64_to)
_b = as_string(_b, fstrict = True)
b = ' --base64=%s' % _b
debugger = os.path.abspath(__file__)
if debugger[-1:] == 'c':
debugger = debugger[:-1]
debugger = as_unicode(debugger, fse)
debug_prints = ['', ' --debug'][g_fDebug]
options = '"%s"%s --debugee%s%s%s%s%s --rid=%s "%s" %s' % (debugger, debug_prints, p, e, r, c, b, rid, ExpandedFilename, args)
python_exec = sys.executable
if python_exec.endswith('w.exe'):
python_exec = python_exec[:-5] + '.exe'
python_exec = as_unicode(python_exec, fse)
if as_bytes('?') in as_bytes(python_exec + debugger, encoding, fstrict = False):
raise BadMBCSPath
if name == POSIX:
shell = CalcUserShell()
terminal_command = CalcTerminalCommand()
if terminal_command in osSpawn:
command = osSpawn[terminal_command] % {'shell': shell, 'exec': python_exec, 'options': options}
else:
command = osSpawn[name] % {'term': terminal_command, 'shell': shell, 'exec': python_exec, 'options': options}
else:
command = osSpawn[name] % {'exec': python_exec, 'options': options}
if name == DARWIN:
s = 'cd "%s" ; %s' % (getcwdu(), command)
command = CalcMacTerminalCommand(s)
print_debug('Terminal open string: %s' % repr(command))
command = as_string(command, encoding)
if name == MAC:
terminalcommand.run(command)
else:
subprocess.Popen(command, shell=True)
def attach(self, key, name = None, fsupress_pwd_warning = False, fsetenv = False, ffirewall_test = True, server = None, fload_breakpoints = True):
assert(is_unicode(key))
self.__verify_unattached()
if key == '':
raise BadArgument
if self.m_rpdb2_pwd is None:
#self.m_printer(STR_PASSWORD_MUST_BE_SET)
raise UnsetPassword
if g_fFirewallTest and ffirewall_test:
firewall_test = CFirewallTest(self.get_remote())
if not firewall_test.run():
raise FirewallBlock
elif not g_fFirewallTest and ffirewall_test:
print_debug('Skipping firewall test.')
if name is None:
name = key
_name = name
self.m_printer(STR_STARTUP_NOTICE)
self.m_state_manager.set_state(STATE_ATTACHING)
try:
servers = [server]
if server == None:
self.m_server_list_object.calcList(self.m_rpdb2_pwd, self.m_rid)
servers = self.m_server_list_object.findServers(key)
server = servers[0]
_name = server.m_filename
errors = self.m_server_list_object.get_errors()
if not key in [server.m_rid, str(server.m_pid)]:
self.__report_server_errors(errors, fsupress_pwd_warning)
self.__attach(server, fsetenv)
if len(servers) > 1:
self.m_printer(STR_MULTIPLE_DEBUGGEES % key)
self.m_printer(STR_ATTACH_CRYPTO_MODE % ([' ' + STR_ATTACH_CRYPTO_MODE_NOT, ''][self.get_encryption()]))
self.m_printer(STR_ATTACH_SUCCEEDED % server.m_filename)
try:
if fload_breakpoints:
self.load_breakpoints()
except:
pass
except (socket.error, CConnectionException):
self.m_printer(STR_ATTACH_FAILED_NAME % _name)
self.m_state_manager.set_state(STATE_DETACHED)
raise
except:
print_debug_exception()
assert False
def report_exception(self, _type, value, tb):
msg = g_error_mapping.get(_type, STR_ERROR_OTHER)
if _type == SpawnUnsupported and os.name == POSIX and not g_fScreen and g_fDefaultStd:
msg += ' ' + STR_SPAWN_UNSUPPORTED_SCREEN_SUFFIX
if _type == UnknownServer and os.name == POSIX and not g_fScreen and g_fDefaultStd:
msg += ' ' + STR_DISPLAY_ERROR
_str = msg % {'type': _type, 'value': value, 'traceback': tb}
self.m_printer(_str)
if not _type in g_error_mapping:
print_exception(_type, value, tb, True)
def __report_server_errors(self, errors, fsupress_pwd_warning = False):
for k, el in errors.items():
if fsupress_pwd_warning and k in [BadVersion, AuthenticationBadData, AuthenticationFailure]:
continue
if k in [BadVersion]:
for (t, v, tb) in el:
self.report_exception(t, v, None)
continue
(t, v, tb) = el[0]
self.report_exception(t, v, tb)
def __attach(self, server, fsetenv):
self.__verify_unattached()
session = CSession(self.m_host, server.m_port, self.m_rpdb2_pwd, self.m_fAllowUnencrypted, self.m_rid)
session.Connect()
if (session.getServerInfo().m_pid != server.m_pid) or (session.getServerInfo().m_filename != server.m_filename):
raise UnexpectedData
self.m_session = session
self.m_server_info = self.get_server_info()
self.getSession().getProxy().set_synchronicity(self.m_fsynchronicity)
self.getSession().getProxy().set_trap_unhandled_exceptions(self.m_ftrap)
self.getSession().getProxy().set_fork_mode(self.m_ffork_into_child, self.m_ffork_auto)
if fsetenv and len(self.m_environment) != 0:
self.getSession().getProxy().set_environ(self.m_environment)
self.request_break()
self.refresh(True)
self.__start_event_monitor()
print_debug('Attached to debuggee on port %d.' % session.m_port)
#self.enable_breakpoint([], fAll = True)
def __verify_unattached(self):
if self.__is_attached():
raise AlreadyAttached
def __verify_attached(self):
if not self.__is_attached():
raise NotAttached
def __is_attached(self):
return (self.m_state_manager.get_state() != STATE_DETACHED) and (self.m_session is not None)
def __verify_broken(self):
if self.m_state_manager.get_state() not in [STATE_BROKEN, STATE_ANALYZE]:
raise DebuggerNotBroken
def refresh(self, fSendUnhandled = False):
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
self.m_remote_event_index = self.getSession().getProxy().sync_with_events(fAnalyzeMode, fSendUnhandled)
self.m_breakpoints_proxy.sync()
def __start_event_monitor(self):
self.m_fStop = False
self.m_worker_thread = threading.Thread(target = self.__event_monitor_proc)
#thread_set_daemon(self.m_worker_thread, True)
self.m_worker_thread.start()
def __event_monitor_proc(self):
self.m_worker_thread_ident = thread.get_ident()
t = 0
nfailures = 0
while not self.m_fStop:
try:
t = ControlRate(t, IDLE_MAX_RATE)
if self.m_fStop:
return
(n, sel) = self.getSession().getProxy().wait_for_event(PING_TIMEOUT, self.m_remote_event_index)
if True in [isinstance(e, CEventForkSwitch) for e in sel]:
print_debug('Received fork switch event.')
self.getSession().pause()
threading.Thread(target = self.restart_session_job).start()
if True in [isinstance(e, CEventExecSwitch) for e in sel]:
print_debug('Received exec switch event.')
self.getSession().pause()
threading.Thread(target = self.restart_session_job, args = (True, )).start()
if True in [isinstance(e, CEventExit) for e in sel]:
self.getSession().shut_down()
self.m_fStop = True
if n > self.m_remote_event_index:
#print >> sys.__stderr__, (n, sel)
self.m_remote_event_index = n
self.m_event_dispatcher_proxy.fire_events(sel)
nfailures = 0
except CConnectionException:
if not self.m_fStop:
self.report_exception(*sys.exc_info())
threading.Thread(target = self.detach_job).start()
return
except socket.error:
if nfailures < COMMUNICATION_RETRIES:
nfailures += 1
continue
if not self.m_fStop:
self.report_exception(*sys.exc_info())
threading.Thread(target = self.detach_job).start()
return
def on_event_conflicting_modules(self, event):
s = ', '.join(event.m_modules_list)
self.m_printer(STR_CONFLICTING_MODULES % s)
def on_event_signal_intercept(self, event):
if self.m_state_manager.get_state() in [STATE_ANALYZE, STATE_BROKEN]:
self.m_printer(STR_SIGNAL_INTERCEPT % (event.m_signame, event.m_signum))
def on_event_signal_exception(self, event):
self.m_printer(STR_SIGNAL_EXCEPTION % (event.m_description, event.m_signame, event.m_signum))
def on_event_embedded_sync(self, event):
#
# time.sleep() allows pending break requests to go through...
#
time.sleep(0.001)
self.getSession().getProxy().embedded_sync()
def on_event_exit(self, event):
self.m_printer(STR_DEBUGGEE_TERMINATED)
threading.Thread(target = self.detach_job).start()
def restart_session_job(self, fSendExitOnFailure = False):
try:
self.getSession().restart(sleep = 3)
return
except:
pass
self.m_fStop = True
if fSendExitOnFailure:
e = CEventExit()
self.m_event_dispatcher_proxy.fire_event(e)
return
self.m_printer(STR_LOST_CONNECTION)
self.detach_job()
def detach_job(self):
try:
self.detach()
except:
pass
def detach(self):
self.__verify_attached()
try:
self.save_breakpoints()
except:
print_debug_exception()
pass
self.m_printer(STR_ATTEMPTING_TO_DETACH)
self.m_state_manager.set_state(STATE_DETACHING)
self.__stop_event_monitor()
try:
#self.disable_breakpoint([], fAll = True)
try:
self.getSession().getProxy().set_trap_unhandled_exceptions(False)
self.request_go(fdetach = True)
except DebuggerNotBroken:
pass
finally:
self.m_state_manager.set_state(STATE_DETACHED)
self.m_session = None
self.m_printer(STR_DETACH_SUCCEEDED)
def __stop_event_monitor(self):
self.m_fStop = True
if self.m_worker_thread is not None:
if thread.get_ident() != self.m_worker_thread_ident:
try:
self.getSession().getProxy().null()
except:
pass
self.m_worker_thread.join()
self.m_worker_thread = None
self.m_worker_thread_ident = None
def request_break(self):
self.getSession().getProxy().request_break()
def request_go(self, fdetach = False):
self.getSession().getProxy().request_go(fdetach)
def request_go_breakpoint(self, filename, scope, lineno):
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
self.getSession().getProxy().request_go_breakpoint(filename, scope, lineno, frame_index, fAnalyzeMode)
def request_step(self):
self.getSession().getProxy().request_step()
def request_next(self):
self.getSession().getProxy().request_next()
def request_return(self):
self.getSession().getProxy().request_return()
def request_jump(self, lineno):
self.getSession().getProxy().request_jump(lineno)
def set_breakpoint(self, filename, scope, lineno, fEnabled, expr, encoding = None):
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
if encoding == None:
encoding = self.m_encoding
self.getSession().getProxy().set_breakpoint(filename, scope, lineno, fEnabled, expr, frame_index, fAnalyzeMode, encoding)
def disable_breakpoint(self, id_list, fAll):
self.getSession().getProxy().disable_breakpoint(id_list, fAll)
def enable_breakpoint(self, id_list, fAll):
self.getSession().getProxy().enable_breakpoint(id_list, fAll)
def delete_breakpoint(self, id_list, fAll):
self.getSession().getProxy().delete_breakpoint(id_list, fAll)
def get_breakpoints(self):
self.__verify_attached()
bpl = self.m_breakpoints_proxy.get_breakpoints()
return bpl
def save_breakpoints(self, filename = ''):
self.__verify_attached()
module_name = self.getSession().getServerInfo().m_module_name
if module_name[:1] == '<':
return
if sys.platform == 'OpenVMS':
#
# OpenVMS filesystem does not support byte stream.
#
mode = 'w'
else:
mode = 'wb'
path = calc_bpl_filename(module_name + filename)
file = open(path, mode)
try:
try:
bpl = self.get_breakpoints()
sbpl = pickle.dumps(bpl)
file.write(sbpl)
except:
print_debug_exception()
raise CException
finally:
file.close()
def load_breakpoints(self, filename = ''):
self.__verify_attached()
module_name = self.getSession().getServerInfo().m_module_name
if module_name[:1] == '<':
return
if sys.platform == 'OpenVMS':
#
# OpenVMS filesystem does not support byte stream.
#
mode = 'r'
else:
mode = 'rb'
path = calc_bpl_filename(module_name + filename)
file = open(path, mode)
ferror = False
try:
try:
bpl = pickle.load(file)
self.delete_breakpoint([], True)
except:
print_debug_exception()
raise CException
#
# No Breakpoints were found in file.
#
if filename == '' and len(bpl.values()) == 0:
raise IOError
for bp in bpl.values():
try:
if bp.m_scope_fqn != None:
bp.m_scope_fqn = as_unicode(bp.m_scope_fqn)
if bp.m_filename != None:
bp.m_filename = as_unicode(bp.m_filename)
if bp.m_expr != None:
bp.m_expr = as_unicode(bp.m_expr)
if bp.m_expr in [None, '']:
bp.m_encoding = as_unicode('utf-8')
self.set_breakpoint(bp.m_filename, bp.m_scope_fqn, bp.m_scope_offset, bp.m_fEnabled, bp.m_expr, bp.m_encoding)
except:
print_debug_exception()
ferror = True
if ferror:
raise CException
finally:
file.close()
def on_event_synchronicity(self, event):
ffire = self.m_fsynchronicity != event.m_fsynchronicity
self.m_fsynchronicity = event.m_fsynchronicity
if ffire:
event = CEventSynchronicity(event.m_fsynchronicity)
self.m_event_dispatcher.fire_event(event)
def set_synchronicity(self, fsynchronicity):
self.m_fsynchronicity = fsynchronicity
if self.__is_attached():
try:
self.getSession().getProxy().set_synchronicity(fsynchronicity)
except NotAttached:
pass
event = CEventSynchronicity(fsynchronicity)
self.m_event_dispatcher.fire_event(event)
def get_synchronicity(self):
return self.m_fsynchronicity
def on_event_trap(self, event):
ffire = self.m_ftrap != event.m_ftrap
self.m_ftrap = event.m_ftrap
if ffire:
event = CEventTrap(event.m_ftrap)
self.m_event_dispatcher.fire_event(event)
def set_trap_unhandled_exceptions(self, ftrap):
self.m_ftrap = ftrap
if self.__is_attached():
try:
self.getSession().getProxy().set_trap_unhandled_exceptions(self.m_ftrap)
except NotAttached:
pass
event = CEventTrap(ftrap)
self.m_event_dispatcher.fire_event(event)
def get_trap_unhandled_exceptions(self):
return self.m_ftrap
def is_unhandled_exception(self):
self.__verify_attached()
return self.getSession().getProxy().is_unhandled_exception()
def on_event_fork_mode(self, event):
ffire = ((self.m_ffork_into_child , self.m_ffork_auto) !=
(event.m_ffork_into_child, event.m_ffork_auto))
self.m_ffork_into_child = event.m_ffork_into_child
self.m_ffork_auto = event.m_ffork_auto
if ffire:
event = CEventForkMode(self.m_ffork_into_child, self.m_ffork_auto)
self.m_event_dispatcher.fire_event(event)
def set_fork_mode(self, ffork_into_child, ffork_auto):
self.m_ffork_into_child = ffork_into_child
self.m_ffork_auto = ffork_auto
if self.__is_attached():
try:
self.getSession().getProxy().set_fork_mode(
self.m_ffork_into_child,
self.m_ffork_auto
)
except NotAttached:
pass
event = CEventForkMode(ffork_into_child, ffork_auto)
self.m_event_dispatcher.fire_event(event)
def get_fork_mode(self):
return (self.m_ffork_into_child, self.m_ffork_auto)
def get_stack(self, tid_list, fAll):
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
r = self.getSession().getProxy().get_stack(tid_list, fAll, fAnalyzeMode)
return r
def get_source_file(self, filename, lineno, nlines):
assert(is_unicode(filename))
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
r = self.getSession().getProxy().get_source_file(filename, lineno, nlines, frame_index, fAnalyzeMode)
return r
def get_source_lines(self, nlines, fAll):
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
r = self.getSession().getProxy().get_source_lines(nlines, fAll, frame_index, fAnalyzeMode)
return r
def get_thread_list(self):
(current_thread_id, thread_list) = self.getSession().getProxy().get_thread_list()
return (current_thread_id, thread_list)
def set_thread(self, tid):
self.reset_frame_indexes(None)
self.getSession().getProxy().set_thread(tid)
def get_namespace(self, nl, filter_level, repr_limit):
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
r = self.getSession().getProxy().get_namespace(nl, filter_level, frame_index, fAnalyzeMode, repr_limit, self.m_encoding, self.m_fraw)
return r
def evaluate(self, expr, fclear_completions = True):
assert(is_unicode(expr))
self.__verify_attached()
self.__verify_broken()
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
(value, warning, error) = self.getSession().getProxy().evaluate(expr, frame_index, fAnalyzeMode, self.m_encoding, self.m_fraw)
if fclear_completions:
self.m_completions.clear()
return (value, warning, error)
def execute(self, suite):
assert(is_unicode(suite))
self.__verify_attached()
self.__verify_broken()
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
(warning, error) = self.getSession().getProxy().execute(suite, frame_index, fAnalyzeMode, self.m_encoding)
self.m_completions.clear()
return (warning, error)
def set_encoding(self, encoding, fraw):
if (self.m_encoding, self.m_fraw) == (encoding, fraw):
return
self.m_encoding = encoding
self.m_fraw = fraw
event = CEventEncoding(encoding, fraw)
self.m_event_dispatcher.fire_event(event)
if self.__is_attached():
self.refresh()
def get_encoding(self):
return (self.m_encoding, self.m_fraw)
def set_host(self, host):
self.__verify_unattached()
try:
if not is_unicode(host):
host = host.decode('ascii')
host.encode('ascii')
except:
raise BadArgument
host = as_string(host, 'ascii')
try:
socket.getaddrinfo(host, 0, 0, socket.SOCK_STREAM)
except socket.gaierror:
if host.lower() != LOCALHOST:
raise
#
# Work-around for gaierror: (-8, 'Servname not supported for ai_socktype')
#
return self.set_host(LOOPBACK)
self.m_host = host
self.m_server_list_object = CServerList(host)
def get_host(self):
return as_unicode(self.m_host)
def calc_server_list(self):
if self.m_rpdb2_pwd is None:
raise UnsetPassword
if g_fFirewallTest:
firewall_test = CFirewallTest(self.get_remote())
if not firewall_test.run():
raise FirewallBlock
else:
print_debug('Skipping firewall test.')
server_list = self.m_server_list_object.calcList(self.m_rpdb2_pwd, self.m_rid)
errors = self.m_server_list_object.get_errors()
self.__report_server_errors(errors)
return (server_list, errors)
def get_server_info(self):
return self.getSession().getServerInfo()
def complete_expression(self, expr):
match = re.search(
r'(?P<unsupported> \.)? (?P<match> ((?P<scope> (\w+\.)* \w+) \.)? (?P<complete>\w*) $)',
expr,
re.U | re.X
)
if match == None:
raise BadArgument
d = match.groupdict()
unsupported, scope, complete = (d['unsupported'], d['scope'], d['complete'])
if unsupported != None:
raise BadArgument
if scope == None:
_scope = as_unicode('list(globals().keys()) + list(locals().keys()) + list(_RPDB2_builtins.keys())')
else:
_scope = as_unicode('dir(%s)' % scope)
if not _scope in self.m_completions:
(v, w, e) = self.evaluate(_scope, fclear_completions = False)
if w != '' or e != '':
print_debug('evaluate() returned the following warning/error: %s' % w + e)
return (expr, [])
cl = list(set(eval(v)))
if '_RPDB2_builtins' in cl:
cl.remove('_RPDB2_builtins')
self.m_completions[_scope] = cl
completions = [attr for attr in self.m_completions[_scope] if attr.startswith(complete)]
completions.sort()
if complete == '':
prefix = expr
else:
prefix = expr[:-len(complete)]
return (prefix, completions)
def _reset_frame_indexes(self, event):
self.reset_frame_indexes(None)
def reset_frame_indexes(self, event):
try:
self.m_state_manager.acquire()
if event is None:
self.__verify_broken()
elif self.m_state_manager.get_state() in [STATE_BROKEN, STATE_ANALYZE]:
return
self.m_stack_depth = None
self.m_stack_depth_exception = None
self.m_frame_index = 0
self.m_frame_index_exception = 0
self.m_completions.clear()
finally:
self.m_state_manager.release()
def set_stack_depth(self, event):
try:
self.m_state_manager.acquire()
self.__verify_broken()
self.m_stack_depth = event.m_stack_depth
self.m_stack_depth_exception = event.m_stack_depth_exception
self.m_frame_index = min(self.m_frame_index, self.m_stack_depth - 1)
self.m_frame_index_exception = min(self.m_frame_index_exception, self.m_stack_depth_exception - 1)
finally:
self.m_state_manager.release()
def set_frame_index(self, frame_index):
try:
self.m_state_manager.acquire()
self.__verify_broken()
if (frame_index < 0) or (self.m_stack_depth is None):
return self.get_frame_index(fLock = False)
if self.m_state_manager.get_state() == STATE_ANALYZE:
self.m_frame_index_exception = min(frame_index, self.m_stack_depth_exception - 1)
si = self.m_frame_index_exception
else:
self.m_frame_index = min(frame_index, self.m_stack_depth - 1)
si = self.m_frame_index
finally:
self.m_state_manager.release()
event = CEventStackFrameChange(si)
self.m_event_dispatcher.fire_event(event)
event = CEventNamespace()
self.m_event_dispatcher.fire_event(event)
return si
def get_frame_index(self, fLock = True):
try:
if fLock:
self.m_state_manager.acquire()
self.__verify_attached()
if self.m_state_manager.get_state() == STATE_ANALYZE:
return self.m_frame_index_exception
else:
return self.m_frame_index
finally:
if fLock:
self.m_state_manager.release()
def set_analyze(self, fAnalyze):
try:
self.m_state_manager.acquire()
if fAnalyze and (self.m_state_manager.get_state() != STATE_BROKEN):
raise DebuggerNotBroken
if (not fAnalyze) and (self.m_state_manager.get_state() != STATE_ANALYZE):
return
state = [STATE_BROKEN, STATE_ANALYZE][fAnalyze]
self.m_state_manager.set_state(state, fLock = False)
finally:
self.m_state_manager.release()
self.refresh()
def getSession(self):
self.__verify_attached()
return self.m_session
def get_state(self):
return as_unicode(self.m_state_manager.get_state())
def set_password(self, _rpdb2_pwd):
assert(is_unicode(_rpdb2_pwd))
if not is_valid_pwd(_rpdb2_pwd):
raise BadArgument
try:
self.m_state_manager.acquire()
self.__verify_unattached()
self.m_rpdb2_pwd = _rpdb2_pwd
finally:
self.m_state_manager.release()
def set_random_password(self):
try:
self.m_state_manager.acquire()
self.__verify_unattached()
self.m_rpdb2_pwd = generate_random_password()
self.m_printer(STR_RANDOM_PASSWORD)
finally:
self.m_state_manager.release()
def get_password(self):
return self.m_rpdb2_pwd
def set_remote(self, fAllowRemote):
try:
self.m_state_manager.acquire()
self.__verify_unattached()
self.m_fAllowRemote = fAllowRemote
finally:
self.m_state_manager.release()
def get_remote(self):
return self.m_fAllowRemote
def set_environ(self, envmap):
self.m_environment = []
try:
for k, v in envmap:
k = as_unicode(k, fstrict = True)
v = as_unicode(v, fstrict = True)
self.m_environment.append((k, v))
except:
raise BadArgument
def get_environ(self):
return self.m_environment
def stop_debuggee(self):
self.__verify_attached()
try:
self.save_breakpoints()
except:
print_debug_exception()
pass
self.m_printer(STR_ATTEMPTING_TO_STOP)
self.m_printer(STR_ATTEMPTING_TO_DETACH)
self.m_state_manager.set_state(STATE_DETACHING)
self.__stop_event_monitor()
try:
self.getSession().getProxy().stop_debuggee()
finally:
self.m_state_manager.set_state(STATE_DETACHED)
self.m_session = None
self.m_printer(STR_DETACH_SUCCEEDED)
class CConsoleInternal(cmd.Cmd, threading.Thread):
def __init__(self, session_manager, stdin = None, stdout = None, fSplit = False):
global g_fDefaultStd
cmd.Cmd.__init__(self, stdin = stdin, stdout = stdout)
threading.Thread.__init__(self)
self.fAnalyzeMode = False
self.fPrintBroken = True
self.m_filename = as_unicode('')
self.m_completion_thread = None
self.use_rawinput = [1, 0][fSplit]
self.m_fSplit = fSplit
self.prompt = [[CONSOLE_PROMPT, CONSOLE_PROMPT_ANALYZE][self.fAnalyzeMode], ""][fSplit]
self.intro = CONSOLE_INTRO
if fSplit:
self.intro += '\n'
#thread_set_daemon(self, True)
self.m_session_manager = session_manager
self.m_session_manager.set_printer(self.printer)
event_type_dict = {CEventState: {}}
self.m_session_manager.register_callback(self.event_handler, event_type_dict, fSingleUse = False)
event_type_dict = {CEventSynchronicity: {}}
self.m_session_manager.register_callback(self.synchronicity_handler, event_type_dict, fSingleUse = False)
event_type_dict = {CEventTrap: {}}
self.m_session_manager.register_callback(self.trap_handler, event_type_dict, fSingleUse = False)
event_type_dict = {CEventForkMode: {}}
self.m_session_manager.register_callback(self.fork_mode_handler, event_type_dict, fSingleUse = False)
self.m_last_source_line = None
self.m_last_nlines = DEFAULT_NUMBER_OF_LINES
self.m_fAddPromptBeforeMsg = False
self.m_eInLoop = threading.Event()
self.cmdqueue.insert(0, '')
self.m_stdout = self.stdout
self.m_encoding = detect_encoding(self.stdin)
g_fDefaultStd = (stdin == None)
if self.use_rawinput:
try:
import readline
cd = readline.get_completer_delims()
if not '.' in cd:
readline.set_completer_delims(cd + '.')
except:
pass
def set_filename(self, filename):
assert(is_unicode(filename))
self.m_filename = filename
def precmd(self, line):
line = as_unicode(line, self.m_encoding)
self.m_fAddPromptBeforeMsg = True
if not event_is_set(self.m_eInLoop):
self.m_eInLoop.set()
time.sleep(0.01)
if not line.strip():
return line
command = line.split(' ', 1)[0].split(SOURCE_MORE, 1)[0].split(SOURCE_LESS, 1)[0]
if command not in ['list', 'l']:
self.m_last_source_line = None
self.m_last_nlines = DEFAULT_NUMBER_OF_LINES
return line
def postcmd(self, stop, line):
self.m_fAddPromptBeforeMsg = False
return stop
def onecmd(self, line):
"""
Default Error handling and reporting of session manager errors.
"""
try:
return cmd.Cmd.onecmd(self, line)
except (socket.error, CConnectionException):
self.m_session_manager.report_exception(*sys.exc_info())
except CException:
self.m_session_manager.report_exception(*sys.exc_info())
except:
self.m_session_manager.report_exception(*sys.exc_info())
print_debug_exception(True)
return False
def default(self, line):
"""
Called on an input line when the command prefix is not recognized.
Over-rides base method at cmd.py.
"""
self.printer(STR_BAD_SYNTAX % line)
def emptyline(self):
pass
def complete(self, text, state):
"""
Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
if self.use_rawinput:
#
# Import cmd to workaround a strange bug in Python.
#
import cmd
return cmd.Cmd.complete(self, text, state)
#
# Without rawinput assuming text includes entire buffer up to cursor.
#
try:
if state != 0:
return self.completion_matches[state]
if not ' ' in text:
self.completion_matches = self.completenames(text)
return self.completion_matches[state]
cmd, args, foo = self.parseline(text)
if cmd == '' or not hasattr(self, 'complete_' + cmd):
self.completion_matches = self.completedefault(text)
return self.completion_matches[state]
compfunc = getattr(self, 'complete_' + cmd)
self.completion_matches = compfunc(text)
return self.completion_matches[state]
except IndexError:
return None
def complete_launch(self, text, line = None, begidx = None, endidx = None):
if line != None and endidx != None:
text = line[:endidx]
if text.endswith(' '):
dn, bn = '', ''
else:
path = text.split()[-1]
dn, bn = os.path.split(path)
prefix = text
if bn != '':
prefix = prefix[:-len(bn)]
if dn == '' and bn.startswith('~'):
if bn == os.path.expanduser(bn):
c = text
else:
c = os.path.join(text, '')
if begidx != None:
c = c[begidx:]
return [c]
pl = [dn]
if dn == '':
pl += os.environ['PATH'].split(os.pathsep)
fl = []
for p in pl:
if p == '':
p = '.'
try:
ep = os.path.expanduser(p)
l = os.listdir(ep)
for f in l:
if not f.startswith(bn):
continue
root, ext = os.path.splitext(f)
if not ext in ['.py', '.pyw', '']:
continue
if os.path.isdir(os.path.join(ep, f)):
c = prefix + os.path.join(f, '')
else:
c = prefix + f
if begidx != None:
c = c[begidx:]
fl.append(c)
except:
pass
fs = set(fl)
cl = list(fs)
cl.sort()
return cl
def complete_eval(self, text, line = None, begidx = None, endidx = None):
t = self.m_completion_thread
if t != None and thread_is_alive(t):
return []
self.m_completion_thread = None
result = [('', [])]
if line != None and endidx != None:
text = line[:endidx]
t = threading.Thread(target = self.complete_expression_job, args = (text, result))
t.start()
t.join(PING_TIMEOUT)
if thread_is_alive(t):
self.m_completion_thread = t
return []
(prefix, completions) = result[-1]
if begidx != None:
prefix = prefix[begidx:]
ce = [prefix + c for c in completions]
return ce
complete_v = complete_eval
complete_exec = complete_eval
complete_x = complete_exec
def complete_expression_job(self, text, result):
try:
(prefix, completions) = self.m_session_manager.complete_expression(text)
result.append((prefix, completions))
except:
print_debug_exception()
def run(self):
self.cmdloop()
def __get_str_wrap(self, _str, max_len):
if len(_str) <= max_len and not '\n' in _str:
return (_str, '')
s = _str[: max_len]
i = s.find('\n')
if i == -1:
i = s.rfind(' ')
if i == -1:
return (s, _str[max_len:])
return (_str[: i], _str[i + 1:])
def printer(self, _str):
if not event_is_set(self.m_eInLoop):
self.m_eInLoop.wait()
fAPBM = self.m_fAddPromptBeforeMsg
prefix = ['', self.prompt.strip('\n')][fAPBM] + CONSOLE_PRINTER
suffix = '\n' + [self.prompt.strip('\n'), ''][fAPBM]
s = _str
while s != '':
s, _s = self.__get_str_wrap(s, CONSOLE_WRAP_INDEX - len(prefix + suffix))
_print(prefix + s + suffix, self.m_stdout, feol = False)
s = _s
self.m_stdout.flush()
def print_notice(self, notice):
nl = notice.split('\n')
i = 0
for l in nl:
_print(l, self.m_stdout)
i += 1
if i % PRINT_NOTICE_LINES_PER_SECTION == 0:
_print("\n" + PRINT_NOTICE_PROMPT, self.m_stdout, feol = False)
response = self.stdin.readline()
if response != '\n':
break
_print('', self.m_stdout)
def event_handler(self, event):
state = event.m_state
if (state == STATE_BROKEN) and self.fPrintBroken:
self.fPrintBroken = False
self.printer(STR_DEBUGGER_HAS_BROKEN)
return
if (state != STATE_ANALYZE) and self.fAnalyzeMode:
self.fAnalyzeMode = False
self.prompt = [CONSOLE_PROMPT, ""][self.m_fSplit]
self.printer(STR_ANALYZE_MODE_TOGGLE % MODE_OFF)
return
if (state == STATE_ANALYZE) and not self.fAnalyzeMode:
self.fAnalyzeMode = True
self.prompt = [CONSOLE_PROMPT_ANALYZE, ""][self.m_fSplit]
self.printer(STR_ANALYZE_MODE_TOGGLE % MODE_ON)
return
def synchronicity_handler(self, event):
self.printer(STR_SYNCHRONICITY_MODE % str(event.m_fsynchronicity))
def trap_handler(self, event):
self.printer(STR_TRAP_MODE_SET % str(event.m_ftrap))
def fork_mode_handler(self, event):
x = [FORK_PARENT, FORK_CHILD][event.m_ffork_into_child]
y = [FORK_MANUAL, FORK_AUTO][event.m_ffork_auto]
self.printer(STR_FORK_MODE_SET % (x, y))
def do_launch(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
if arg[:2] == '-k':
fchdir = False
_arg = arg[2:].strip()
else:
fchdir = True
_arg = arg
self.fPrintBroken = True
try:
self.m_session_manager.launch(fchdir, _arg)
return
except BadArgument:
self.printer(STR_BAD_ARGUMENT)
except IOError:
self.printer(STR_FILE_NOT_FOUND % arg)
except:
self.fPrintBroken = False
raise
self.fPrintBroken = False
def do_restart(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
try:
self.m_session_manager.restart()
return
except BadArgument:
self.printer(STR_BAD_ARGUMENT)
except IOError:
self.printer(STR_FILE_NOT_FOUND % arg)
except:
self.fPrintBroken = False
raise
self.fPrintBroken = False
def do_attach(self, arg):
if arg == '':
return self.__scripts(arg)
self.fPrintBroken = True
try:
self.m_session_manager.attach(arg)
return
except BadArgument:
self.printer(STR_BAD_ARGUMENT)
except:
self.fPrintBroken = False
raise
self.fPrintBroken = False
def __scripts(self, arg):
if self.m_session_manager.get_password() is None:
_print(STR_PASSWORD_MUST_BE_SET, self.m_stdout)
return
host = self.m_session_manager.get_host()
_print(STR_SCRIPTS_CONNECTING % host, self.m_stdout)
(server_list, errors) = self.m_session_manager.calc_server_list()
if server_list == []:
_print(STR_SCRIPTS_NO_SCRIPTS % host, self.m_stdout)
return
try:
spid = self.m_session_manager.get_server_info().m_pid
except NotAttached:
spid = None
_print(STR_SCRIPTS_TO_DEBUG % host, self.m_stdout)
for s in server_list:
m = ['', SYMBOL_MARKER][spid == s.m_pid]
_print(' %1s %-5d %s' % (m, s.m_pid, s.m_filename), self.m_stdout)
def do_detach(self, arg):
if not arg == '':
self.printer(STR_BAD_ARGUMENT)
return
self.m_session_manager.detach()
def do_host(self, arg):
if arg == '':
host = self.m_session_manager.get_host()
_print(host, self.m_stdout)
return
try:
self.m_session_manager.set_host(arg)
except socket.gaierror:
e = sys.exc_info()[1]
self.printer(MSG_ERROR_HOST_TEXT % (arg, e))
def do_break(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
self.m_session_manager.request_break()
do_b = do_break
def __parse_bp_arg(self, arg, fAllowExpr = True):
_args = arg.split(BP_EVAL_SEP)
if (len(_args) > 1) and (not fAllowExpr):
raise BadArgument
if len(_args) > 1:
expr = _args[1].strip()
else:
expr = ''
rf = _args[0].rfind(BP_FILENAME_SEP)
if rf == -1:
args = [_args[0]]
else:
args = [_args[0][:rf], _args[0][rf + 1:]]
filename = ['', args[0]][len(args) > 1]
if filename in [None, '']:
filename = self.m_filename
try:
lineno = int(args[-1])
scope = ''
except ValueError:
lineno = 0
scope = args[-1].strip()
return (filename, scope, lineno, expr)
def do_go(self, arg):
if self.fAnalyzeMode:
self.printer(STR_ILEGAL_ANALYZE_MODE_CMD)
return
try:
if arg != '':
(filename, scope, lineno, expr) = self.__parse_bp_arg(arg, fAllowExpr = False)
self.fPrintBroken = True
self.m_session_manager.request_go_breakpoint(filename, scope, lineno)
return
self.fPrintBroken = True
self.m_session_manager.request_go()
return
except BadArgument:
self.printer(STR_BAD_ARGUMENT)
except IOError:
self.printer(STR_FILE_NOT_FOUND % filename)
except InvalidScopeName:
self.printer(STR_SCOPE_NOT_FOUND % scope)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
except:
self.fPrintBroken = False
raise
self.fPrintBroken = False
do_g = do_go
def do_step(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
if self.fAnalyzeMode:
self.printer(STR_ILEGAL_ANALYZE_MODE_CMD)
return
try:
self.m_session_manager.request_step()
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
do_s = do_step
def do_next(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
if self.fAnalyzeMode:
self.printer(STR_ILEGAL_ANALYZE_MODE_CMD)
return
try:
self.m_session_manager.request_next()
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
do_n = do_next
def do_return(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
if self.fAnalyzeMode:
self.printer(STR_ILEGAL_ANALYZE_MODE_CMD)
return
try:
self.m_session_manager.request_return()
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
do_r = do_return
def do_jump(self, arg):
try:
lineno = int(arg)
except ValueError:
self.printer(STR_BAD_ARGUMENT)
return
try:
self.m_session_manager.request_jump(lineno)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
do_j = do_jump
def do_bp(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
try:
(filename, scope, lineno, expr) = self.__parse_bp_arg(arg, fAllowExpr = True)
self.m_session_manager.set_breakpoint(filename, scope, lineno, True, expr)
except BadArgument:
self.printer(STR_BAD_ARGUMENT)
except IOError:
self.printer(STR_FILE_NOT_FOUND % filename)
except InvalidScopeName:
self.printer(STR_SCOPE_NOT_FOUND % scope)
except SyntaxError:
self.printer(STR_BAD_EXPRESSION % expr)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
def do_be(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
try:
id_list = []
fAll = (arg == SYMBOL_ALL)
if not fAll:
sid_list = arg.split()
id_list = [int(sid) for sid in sid_list]
self.m_session_manager.enable_breakpoint(id_list, fAll)
except ValueError:
self.printer(STR_BAD_ARGUMENT)
def do_bd(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
try:
id_list = []
fAll = (arg == SYMBOL_ALL)
if not fAll:
sid_list = arg.split()
id_list = [int(sid) for sid in sid_list]
self.m_session_manager.disable_breakpoint(id_list, fAll)
except ValueError:
self.printer(STR_BAD_ARGUMENT)
def do_bc(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
try:
id_list = []
fAll = (arg == SYMBOL_ALL)
if not fAll:
sid_list = arg.split()
id_list = [int(sid) for sid in sid_list]
self.m_session_manager.delete_breakpoint(id_list, fAll)
except ValueError:
self.printer(STR_BAD_ARGUMENT)
def do_bl(self, arg):
bpl = self.m_session_manager.get_breakpoints()
bplk = list(bpl.keys())
bplk.sort()
_print(STR_BREAKPOINTS_LIST, self.m_stdout)
for id in bplk:
bp = bpl[id]
if bp.m_expr:
expr = bp.m_expr
else:
expr = ''
try:
expr.encode('ascii', 'strict')
encoding = ''
except:
encoding = bp.m_encoding
scope = bp.m_scope_fqn
if scope.startswith(MODULE_SCOPE + '.'):
scope = scope[len(MODULE_SCOPE) + 1:]
elif scope.startswith(MODULE_SCOPE2 + '.'):
scope = scope[len(MODULE_SCOPE2) + 1:]
state = [STATE_DISABLED, STATE_ENABLED][bp.isEnabled()]
s = STR_BREAKPOINTS_TEMPLATE % (id, state, bp.m_lineno, clip_filename(bp.m_filename, 45), calc_suffix(scope, 45), calc_prefix(expr, 50), encoding)
_print(s.rstrip() + '\n', self.m_stdout)
def do_save(self, arg):
self.m_session_manager.save_breakpoints(arg)
_print(STR_BREAKPOINTS_SAVED, self.m_stdout)
return
def do_load(self, arg):
try:
self.m_session_manager.load_breakpoints(arg)
_print(STR_BREAKPOINTS_LOADED, self.m_stdout)
return
except IOError:
error = [STR_BREAKPOINTS_FILE_NOT_FOUND, STR_BREAKPOINTS_NOT_FOUND][arg == '']
self.printer(error)
def do_stack(self, arg):
if self.fAnalyzeMode and (arg != ''):
self.printer(STR_ILEGAL_ANALYZE_MODE_ARG)
return
try:
tid_list = []
fAll = (arg == SYMBOL_ALL)
if not fAll:
sid_list = arg.split()
tid_list = [int(sid) for sid in sid_list]
sl = self.m_session_manager.get_stack(tid_list, fAll)
if len(sl) == 0:
self.printer(STR_NO_THREADS_FOUND)
return
frame_index = self.m_session_manager.get_frame_index()
m = None
for st in sl:
s = st.get(DICT_KEY_STACK, [])
tid = st.get(DICT_KEY_TID, 0)
fBroken = st.get(DICT_KEY_BROKEN, False)
fCurrent = st.get(DICT_KEY_CURRENT_TID, False)
if m is not None:
_print('', self.m_stdout)
_print(STR_STACK_TRACE % tid, self.m_stdout)
i = 0
while i < len(s):
e = s[-(1 + i)]
marker = [SOURCE_STATE_UNBROKEN, SYMBOL_MARKER][fBroken]
if fCurrent:
m = ['', marker][i == frame_index]
else:
m = ['', marker][i == 0]
_print(' %1s %5d %-28s %4d %s' % (m, i, calc_suffix(e[0], 28), e[1], calc_prefix(e[2], 20)), self.m_stdout)
i += 1
except ValueError:
self.printer(STR_BAD_ARGUMENT)
except (NoExceptionFound, NoThreads):
self.m_session_manager.report_exception(*sys.exc_info())
do_k = do_stack
def do_list(self, arg):
rf = arg.rfind(BP_FILENAME_SEP)
if rf == -1:
_filename = ''
__args2 = arg
else:
_filename = arg[:rf]
__args2 = arg[rf + 1:]
_args = __args2.split(BP_EVAL_SEP)
fAll = (_args[0] == SYMBOL_ALL)
fMore = (_args[0] == SOURCE_MORE)
fLess = (_args[0] == SOURCE_LESS)
fEntire = (_args[0] == SOURCE_ENTIRE_FILE)
fCurrent = (_args[0] == '')
fLine = False
l = 1
try:
if len(_args) > 1:
nlines = int(_args[1])
else:
nlines = self.m_last_nlines
if not (fAll or fMore or fLess or fEntire or fCurrent):
l = int(_args[0])
fLine = True
except ValueError:
self.printer(STR_BAD_ARGUMENT)
return
if self.fAnalyzeMode and fAll:
self.printer(STR_ILEGAL_ANALYZE_MODE_ARG)
return
if fMore and self.m_last_source_line:
l = max(1, self.m_last_source_line + self.m_last_nlines // 2 + 1)
fLine = True
elif fLess and self.m_last_source_line:
l = max(1, self.m_last_source_line - (self.m_last_nlines - 1) // 2 - nlines)
fLine = True
try:
if fEntire:
r = [self.m_session_manager.get_source_file(_filename, -1, -1)]
elif fLine:
r = [self.m_session_manager.get_source_file(_filename, l, nlines)]
elif _filename != '':
r = [self.m_session_manager.get_source_file(_filename, l, nlines)]
else:
r = self.m_session_manager.get_source_lines(nlines, fAll)
if len(r) == 0:
self.printer(STR_NO_THREADS_FOUND)
return
m = None
for d in r:
tid = d.get(DICT_KEY_TID, 0)
filename = d.get(DICT_KEY_FILENAME, '')
breakpoints = d.get(DICT_KEY_BREAKPOINTS, {})
source_lines = d.get(DICT_KEY_LINES, [])
first_lineno = d.get(DICT_KEY_FIRST_LINENO, 0)
if len(r) == 1 and first_lineno != 0:
l = first_lineno
fBroken = d.get(DICT_KEY_BROKEN, False)
frame_event = d.get(DICT_KEY_EVENT, '')
frame_lineno = d.get(DICT_KEY_FRAME_LINENO, 0)
if m is not None:
_print('', self.m_stdout)
_print(STR_SOURCE_LINES % (tid, filename), self.m_stdout)
for i, line in enumerate(source_lines):
lineno = first_lineno + i
if lineno != frame_lineno:
m = ''
elif not fBroken:
m = SOURCE_STATE_UNBROKEN + SYMBOL_MARKER
elif frame_event == 'call':
m = SOURCE_EVENT_CALL + SYMBOL_MARKER
elif frame_event == 'line':
m = SOURCE_EVENT_LINE + SYMBOL_MARKER
elif frame_event == 'return':
m = SOURCE_EVENT_RETURN + SYMBOL_MARKER
elif frame_event == 'exception':
m = SOURCE_EVENT_EXCEPTION + SYMBOL_MARKER
if breakpoints.get(lineno, None) == STATE_ENABLED:
b = SOURCE_BP_ENABLED
elif breakpoints.get(lineno, None) == STATE_DISABLED:
b = SOURCE_BP_DISABLED
else:
b = ''
line = line.replace('\t', ' ' * PYTHON_TAB_WIDTH)
# [Cyclopath] Increase list cmd. width, since our code is
# 80 chars wide.
#_print(' %2s %1s %5d %s' % (m, b, lineno, calc_prefix(line[:-1], 60)), self.m_stdout)
_print(' %2s %1s %5d %s' % (m, b, lineno, calc_prefix(line[:-1], 80)), self.m_stdout)
if fAll or fEntire:
self.m_last_source_line = None
elif len(source_lines) != 0:
self.m_last_source_line = [l + (nlines - 1) // 2, frame_lineno][l == -1]
self.m_last_nlines = nlines
except (InvalidFrame, IOError):
self.printer(STR_SOURCE_NOT_FOUND)
except (NoExceptionFound, NoThreads):
self.m_session_manager.report_exception(*sys.exc_info())
do_l = do_list
def do_up(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
try:
fi = self.m_session_manager.get_frame_index()
self.m_session_manager.set_frame_index(fi - 1)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
def do_down(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
try:
fi = self.m_session_manager.get_frame_index()
self.m_session_manager.set_frame_index(fi + 1)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
def evaluate_job(self, sync_event, expr):
try:
(value, warning, error) = self.m_session_manager.evaluate(expr)
if warning:
self.printer(STR_WARNING % warning)
if error:
_print(error + '\n', self.m_stdout)
_print(value, self.m_stdout)
if event_is_set(sync_event):
_print(self.prompt, self.m_stdout, feol = False)
return
except (NoExceptionFound, DebuggerNotBroken):
self.m_session_manager.report_exception(*sys.exc_info())
except (socket.error, CConnectionException):
self.m_session_manager.report_exception(*sys.exc_info())
except CException:
self.m_session_manager.report_exception(*sys.exc_info())
except:
self.m_session_manager.report_exception(*sys.exc_info())
print_debug_exception(True)
def do_eval(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
sync_event = threading.Event()
t = threading.Thread(target = self.evaluate_job, args = (sync_event, arg))
t.start()
t.join(WAIT_FOR_BREAK_TIMEOUT)
if thread_is_alive(t):
_print(STR_OUTPUT_WARNING_ASYNC, self.m_stdout)
sync_event.set()
do_v = do_eval
def execute_job(self, sync_event, suite):
try:
(warning, error) = self.m_session_manager.execute(suite)
if warning:
self.printer(STR_WARNING % warning)
if error:
_print(error + '\n', self.m_stdout)
if event_is_set(sync_event):
_print(self.prompt, self.m_stdout, feol = False)
return
except (NoExceptionFound, DebuggerNotBroken):
self.m_session_manager.report_exception(*sys.exc_info())
except (socket.error, CConnectionException):
self.m_session_manager.report_exception(*sys.exc_info())
except CException:
self.m_session_manager.report_exception(*sys.exc_info())
except:
self.m_session_manager.report_exception(*sys.exc_info())
print_debug_exception(True)
def do_exec(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
_print(STR_OUTPUT_WARNING, self.m_stdout)
sync_event = threading.Event()
t = threading.Thread(target = self.execute_job, args = (sync_event, arg))
t.start()
t.join(WAIT_FOR_BREAK_TIMEOUT)
if thread_is_alive(t):
_print(STR_OUTPUT_WARNING_ASYNC, self.m_stdout)
sync_event.set()
do_x = do_exec
def do_encoding(self, arg):
if arg == '':
encoding, fraw = self.m_session_manager.get_encoding()
if encoding != ENCODING_AUTO:
try:
codecs.lookup(encoding)
except:
encoding += ' (?)'
if fraw:
encoding += ', ' + ENCODING_RAW
_print(STR_ENCODING_MODE % encoding, self.m_stdout)
return
if ',' in arg:
encoding, raw = arg.split(',')
else:
encoding, raw = arg, ''
encoding = encoding.strip()
if encoding == '':
encoding, fraw = self.m_session_manager.get_encoding()
fraw = 'raw' in raw
self.m_session_manager.set_encoding(encoding, fraw)
if encoding != ENCODING_AUTO:
try:
codecs.lookup(encoding)
except:
encoding += ' (?)'
_print(STR_ENCODING_BAD, self.m_stdout)
if fraw:
encoding += ', ' + ENCODING_RAW
_print(STR_ENCODING_MODE_SET % encoding, self.m_stdout)
def do_thread(self, arg):
if self.fAnalyzeMode and (arg != ''):
self.printer(STR_ILEGAL_ANALYZE_MODE_ARG)
return
try:
if arg != '':
tid = int(arg)
self.m_session_manager.set_thread(tid)
_print(STR_THREAD_FOCUS_SET, self.m_stdout)
return
(current_thread_id, tl) = self.m_session_manager.get_thread_list()
_print(STR_ACTIVE_THREADS, self.m_stdout)
for i, t in enumerate(tl):
m = ['', SYMBOL_MARKER][t[DICT_KEY_TID] == current_thread_id]
state = [STATE_RUNNING, STR_STATE_BROKEN][t[DICT_KEY_BROKEN]]
_print(' %1s %3d %5d %-15s %s' % (m, i, t[DICT_KEY_TID], t[DICT_KEY_NAME], state[:25]), self.m_stdout)
except ValueError:
self.printer(STR_BAD_ARGUMENT)
except ThreadNotFound:
self.printer(STR_THREAD_NOT_FOUND)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
do_t = do_thread
def do_analyze(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
try:
self.m_session_manager.set_analyze(not self.fAnalyzeMode)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
do_a = do_analyze
def do_synchro(self, arg):
if arg == '':
fsynchronicity = self.m_session_manager.get_synchronicity()
_print(STR_SYNCHRONICITY_MODE % str(fsynchronicity), self.m_stdout)
return
if arg == str(True):
fsynchronicity = True
elif arg == str(False):
fsynchronicity = False
else:
_print(STR_BAD_ARGUMENT, self.m_stdout)
return
self.m_session_manager.set_synchronicity(fsynchronicity)
def do_trap(self, arg):
if arg == '':
ftrap = self.m_session_manager.get_trap_unhandled_exceptions()
_print(STR_TRAP_MODE % str(ftrap), self.m_stdout)
return
if arg == str(True):
ftrap = True
elif arg == str(False):
ftrap = False
else:
_print(STR_BAD_ARGUMENT, self.m_stdout)
return
self.m_session_manager.set_trap_unhandled_exceptions(ftrap)
def do_fork(self, arg):
(ffork_into_child, ffork_auto) = self.m_session_manager.get_fork_mode()
if arg == '':
x = [FORK_PARENT, FORK_CHILD][ffork_into_child]
y = [FORK_MANUAL, FORK_AUTO][ffork_auto]
_print(STR_FORK_MODE % (x, y), self.m_stdout)
return
arg = arg.lower()
if FORK_PARENT in arg:
ffork_into_child = False
elif FORK_CHILD in arg:
ffork_into_child = True
if FORK_AUTO in arg:
ffork_auto = True
elif FORK_MANUAL in arg:
ffork_auto = False
self.m_session_manager.set_fork_mode(ffork_into_child, ffork_auto)
def do_password(self, arg):
if arg == '':
_rpdb2_pwd = self.m_session_manager.get_password()
if _rpdb2_pwd is None:
_print(STR_PASSWORD_NOT_SET, self.m_stdout)
else:
_print(STR_PASSWORD_SET % _rpdb2_pwd, self.m_stdout)
return
_rpdb2_pwd = arg.strip('"\'')
try:
self.m_session_manager.set_password(_rpdb2_pwd)
_print(STR_PASSWORD_SET % _rpdb2_pwd, self.m_stdout)
except BadArgument:
_print(STR_PASSWORD_BAD, self.m_stdout)
def do_remote(self, arg):
if arg == '':
fAllowRemote = self.m_session_manager.get_remote()
_print(STR_REMOTE_MODE % str(fAllowRemote), self.m_stdout)
return
if arg == str(True):
fAllowRemote = True
elif arg == str(False):
fAllowRemote = False
else:
_print(STR_BAD_ARGUMENT, self.m_stdout)
return
self.m_session_manager.set_remote(fAllowRemote)
_print(STR_REMOTE_MODE % str(fAllowRemote), self.m_stdout)
def do_env(self, arg):
env = self.m_session_manager.get_environ()
if arg == '':
if len(env) == 0:
_print(STR_ENVIRONMENT_EMPTY, self.m_stdout)
return
_print(STR_ENVIRONMENT, self.m_stdout)
for k, v in env:
_print('%s=%s' % (k, v), self.m_stdout)
return
if arg[:2] == '-d':
k = arg[2:].strip()
_env = [(_k, _v) for (_k, _v) in env if _k != k]
self.m_session_manager.set_environ(_env)
return
try:
k, v = arg.split('=')
k = k.strip()
v = v.strip()
except ValueError:
self.printer(STR_BAD_ARGUMENT)
return
_env = [(_k, _v) for (_k, _v) in env if _k != k]
_env.append((k, v))
self.m_session_manager.set_environ(_env)
def do_stop(self, arg):
self.m_session_manager.stop_debuggee()
def do_exit(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
if self.m_session_manager.get_state() != STATE_DETACHED:
try:
self.do_stop('')
except (socket.error, CConnectionException):
self.m_session_manager.report_exception(*sys.exc_info())
except CException:
self.m_session_manager.report_exception(*sys.exc_info())
except:
self.m_session_manager.report_exception(*sys.exc_info())
print_debug_exception(True)
_print('', self.m_stdout)
return True
do_EOF = do_exit
def do_copyright(self, arg):
self.print_notice(COPYRIGHT_NOTICE)
def do_license(self, arg):
self.print_notice(LICENSE_NOTICE + COPY_OF_THE_GPL_LICENSE)
def do_credits(self, arg):
self.print_notice(CREDITS_NOTICE)
def do_help(self, arg):
cmd.Cmd.do_help(self, arg)
if arg == '':
help_notice = """Security:
----------------
password - Get or set the channel password.
remote - Get or set "allow connections from remote machines" mode.
Session Control:
-----------------
env - Display or set the environment setting for new sessions.
host - Display or change host.
attach - Display scripts or attach to a script on host.
detach - Detach from script.
launch - Start a script and attach to it.
restart - Restart a script.
stop - Shutdown the debugged script.
exit - Exit from debugger.
Debuggee Control:
-----------------
break - Request an immediate break.
step - Continue to the next execution line.
next - Continue to the next execution line in the current frame.
return - Continue until the debugger is about to return from the frame.
jump - Jump to a line in the current scope.
go - Continue execution.
Breakpoints Control:
--------------------
bp - Set a break point.
bd - Disable a breakpoint.
be - Enable a breakpoint.
bc - Clear (delete) a breakpoint.
bl - List all breakpoints.
load - Load session breakpoints.
save - save session breakpoints.
Misc:
-----
thread - Display threads or switch to a particular thread.
list - List source code.
stack - Display stack trace.
up - Go up one frame in stack.
down - Go down one frame in stack.
encoding - Set the source encoding used by exec and eval commands.
eval - Evaluate expression in the context of the current frame.
exec - Execute suite in the context of the current frame.
analyze - Toggle analyze last exception mode.
trap - Get or set "trap unhandled exceptions" mode.
fork - Get or set fork handling mode.
synchro - Get or set synchronicity mode.
License:
----------------
copyright - Print copyright notice.
license - Print license.
credits - Print credits information.
type help <topic> for futher information."""
self.print_notice(help_notice)
def help_copyright(self):
_print("""copyright
Print copyright notice.""", self.m_stdout)
def help_license(self):
_print("""license
Print license.""", self.m_stdout)
def help_credits(self):
_print("""credits
Print credits information.""", self.m_stdout)
def help_help(self):
_print("""help <cmd>
Print help for command <cmd>.
On the other hand I guess that you already know that, don't you?""", self.m_stdout)
def help_analyze(self):
_print("""analyze
(shorthand - a)
Toggle analyze last exception mode.
The following changes to the debugger behavior apply in analyze mode:
The debugger prompt changes to 'Analyze>'.
'go', 'step', 'next', and 'return' are not allowed.
'thread' does not allow to change the thread focus.
'stack' allows no arguments.
'list' does not accept the '*' (all threads) argument
'stack', 'list', 'eval', 'exec', 'up', and 'down' operate on the thrown
exception.""", self.m_stdout)
help_a = help_analyze
def help_password(self):
_print("""password <password>
Get or set the channel password.
Communication between the console and the debuggee is always authenticated and
optionally encrypted. The password (A secret known to the console and the
debuggee alone) governs both security methods. The password is never
communicated between the two components on the communication channel.
A password is always required since unsecured communication between the
console and the debuggee might expose your machine to attacks.""", self.m_stdout)
def help_remote(self):
_print("""remote [True | False]
Get or set "allow connections from remote machines" mode.
When set to False:
Newly launched debuggees will listen on localhost only. In this mode, debugger
consoles on remote machines will NOT BE able to see or attach to the debuggee.
When set to True:
Newly launched debuggees will listen on INADDR_ANY. In this mode, debugger
consoles on remote machines will BE able to see and attach to the debuggee.""", self.m_stdout)
def help_trap(self):
_print("""trap [True | False]
Get or set "trap unhandled exceptions" mode.
When set to False:
Debuggee will ignore unhandled exceptions.
When set to True:
Debuggee will pause on unhandled exceptions for inspection.""", self.m_stdout)
def help_synchro(self):
_print("""synchro [True | False]
Get or set the synchronicity mode.
Traditional Python debuggers that use the inspected thread
(usually the main thread) to query or modify the script
name-space have to wait until the script hits a break-point.
Synchronicity allows the debugger to query and modify the
script name-space even if its threads are still running or
blocked in C library code by using special worker threads.
In some rare cases querying or modifying data in
synchronicity can crash the script. For example in some
Linux builds of wxPython querying the state of wx objects
from a thread other than the GUI thread can crash the
script. If this happens or if you want to restrict these
operations to the inspected thread, turn synchronicity off.
Default is True.""", self.m_stdout)
def help_fork(self):
_print("""fork [parent | child] [manual | auto]
Get or set fork handling mode.
Without arguments returns the current mode.
When 'parent' is specified the debugger will continue to debug the original
parent process after a fork.
When 'child' is specified the debugger will switch to debug the forked
child process after a fork.
When 'manual' is specified the debugger will pause before doing a fork.
When 'auto' is specified the debugger will go through the fork without
pausing and will make the forking decision based on the parent/child
setting.
WARNING:
On some Posix OS such as FreeBSD, Stepping into the child fork
can result in termination of the child process since the debugger
uses threading for its operation and on these systems threading and
forking can conflict.
""", self.m_stdout)
def help_stop(self):
_print("""stop
Shutdown the debugged script.""", self.m_stdout)
def help_launch(self):
_print("""launch [-k] <script_name> [<script_args>]
Start script <script_name> and attach to it.
-k Don't change the current working directory. By default the working
directory of the launched script is set to its folder.""", self.m_stdout)
def help_restart(self):
_print("""restart
Restart a script with same arguments from last launch.""", self.m_stdout)
def help_attach(self):
_print("""attach [<arg>]
Without an argument, 'attach' prints the scripts available for debugging
on the selected host. To select a host use the 'host' command. A script is
considered available for debugging only if it is using the rpdb2 module or
has been executed by the debugger.
If the debugger is already attached to a script, a special character will
mark that script in the list.
When <arg> is an integer the debugger will try to attach to a script with
that pid.
When <arg> is a string the debugger will try to attach to a script
with that name in the list.""", self.m_stdout)
def help_detach(self):
_print("""detach
Detach from the script the debugger is currently attached to. The detached
script will continue execution.""", self.m_stdout)
def help_break(self):
_print("""break
(shorthand - b)
Request script to break (pause execution as if it hit a breakpoint).
The 'break' command returns immdeiately but the break is only established
when an active thread submits to the debugger control. If a thread is
doing a system call or executing C code, this will happen only when
it returns to do python code.""", self.m_stdout)
help_b = help_break
def help_bp(self):
_print("""bp [<filename>':'] (<line> | <scope>) [',' <expr>]
Set a breakpoint.
<filename> - either the filename or the module name.
<line> - is the line number to assign the breakpoint to.
<scope> - is a "fully qualified" function name. That is, not only the
function name but also the class name (in case of a member
function), such as MyClass.MyMemberFunction.
<expr> - condition to evaluate in the context of the frame. If it
evaluates to 'True' the break point will break into the debugger.
In case the <filemame> is omitted, the current file is assumed. In this case
the debuggee has to be waiting at break point.
Examples:
bp test_file.py:20
bp test_file.py:MyClass.Foo
bp 304
Type 'help break' for more information on breakpoints and threads.""", self.m_stdout)
def help_be(self):
_print("""be (<id_list> | '*')
Enable breakpoints.
<id_list> - is a space delimited list of at least one breakpoint id
'*' - Enable all breakpoints.""", self.m_stdout)
def help_bd(self):
_print("""bd (<id_list> | '*')
Disable breakpoints.
<id_list> - is a space delimited list of at least one breakpoint id
'*' - disable all breakpoints.""", self.m_stdout)
def help_bc(self):
_print("""bc (<id_list> | '*')
Clear (delete) breakpoints.
<id_list> - is a space delimited list of at least one breakpoint id
'*' - clear all breakpoints.""", self.m_stdout)
def help_bl(self):
_print("""bl
List all breakpoints, sorted by their id.""", self.m_stdout)
def help_load(self):
_print("""load [<filename>]
Load breakpoints.
<filename> - optional breakpoints filename. The filename should not include
a file extension.""", self.m_stdout)
def help_save(self):
_print("""save [<filename>]
save breakpoints.
<filename> - optional breakpoints filename. The filename should not include
a file extension.""", self.m_stdout)
def help_go(self):
_print("""go [[<filename>':'] (<line> | <scope>)]
(shorthand - g)
Resume execution of a script that is waiting at break point.
If an argument is present, continue execution until that argument is reached.
<filename> - is the file name which basically is the script's name without
the '.py' extension.
<line> - is the line number to assign the breakpoint to.
<scope> - is a "fully qualified" function name. That is, not only the
function name but also the class name (in case of a member
function), such as MyClass.MyMemberFunction.""", self.m_stdout)
help_g = help_go
def help_exit(self):
_print("""exit
Exit the debugger. If the debugger is attached to a script, the debugger
will attempt to detach from the script first.""", self.m_stdout)
help_EOF = help_exit
def help_host(self):
_print("""host [<arg>]
Without an argument, 'host' prints the current selected host.
With an argument <arg>, 'host' attempts to resolve <arg> to a known ip
address or a domain name. If it is successful, that host will become the
selected host.
The default selected host is the local host.
Subsequent 'attach' commands will be done on the selected host.
Type 'help attach' for more information.""", self.m_stdout)
def help_stack(self):
_print("""stack [<tid> | '*']
(shorthand - k)
Without an argument, 'stack' prints the stack trace of the focused thread.
If the thread is waiting at break point a special character will mark the
focused frame.
<tid> - print the stack of thread <tid>
'*' - print the stacks of all active threads.
Type 'help break' for more information on breakpoints and threads.
Type 'help up' or 'help down' for more information on focused frames.""", self.m_stdout)
help_k = help_stack
def help_list(self):
_print("""list [<file_name>:][<line_no> | '+' | '-' | '^' | '*'] [',' <nlines>]
(shorthand - l)
Without an argument, 'list' prints the source lines around the current line
of the focused thread in the focused frame. A special character sequence will
mark the current line according to the event:
'C>' - call - A function is called.
'L>' - line - The interpreter is about to execute a new line of code.
'R>' - return - A function is about to return.
'E>' - exception - An exception has been thrown.
'*>' - running - The thread is running.
If a breakpoint is assigned to a line, that line will be marked with:
'B' - if the breakpoint is enabled
'D' - if the breakpoint is disabled
<file_name> - List source from filename
<line_no> - Print the source lines around that line number in the same file
of the current line.
'+' - Print the next lines in the file.
'-' - Print the previous lines in the file.
'^' - Print the entire file.
'*' - Print the source lines for each of the active threads.
<nlines> - Print <nlines> of source
Type 'help break' for more information on breakpoints and threads.
Type 'help up' or 'help down' for more information on focused frames.""", self.m_stdout)
help_l = help_list
def help_thread(self):
_print("""thread [<no> | <tid>]
(shorthand - t)
Without an argument, 'thread' prints the list of known active threads, with
their corresponding state, which can be either 'running' or
'waiting at break point'. A special character will mark the focused thread.
With an argument <tid>, 'thread' will attempt to set the debugger focus to
the thread of that tid.
With an argument <no>, 'thread' will attempt to set the debugger focus to
the thread of that order in the thread list.
Type 'help break' for more information on breakpoints and threads.""", self.m_stdout)
help_t = help_thread
def help_jump(self):
_print("""jump <lineno>
(shorthand - j)
Jump to line <lineno> in the current scope.""", self.m_stdout)
help_j = help_jump
def help_next(self):
_print("""next
(shorthand - n)
Continue execution until the next line in the current function
is reached or it returns.""", self.m_stdout)
help_n = help_next
def help_step(self):
_print("""step
(shorthand - s)
Execute the current line, stop at the first possible occasion
(either in a function that is called or in the current function).""", self.m_stdout)
help_s = help_step
def help_return(self):
_print("""next
(shorthand - r)
Continue execution until the current function returns.""", self.m_stdout)
help_r = help_return
def help_up(self):
_print("""up
move the debugger focus one frame up the stack of the debugged thread
(closer to the current, most recently executed frame). Evaluation of
expressions or execution of statements will be done at the local and global
name spaces of the focused frame.
Type 'help eval' for more information on evaluation of expressions.
Type 'help exec' for more information on execution of statements.""", self.m_stdout)
def help_down(self):
_print("""down
move the debugger focus one frame down the stack of the debugged thread
(closer to the current, most recently executed frame). Evaluation of
expressions or execution of statements will be done at the local and global
name spaces of the focused frame.
Type 'help eval' for more information on evaluation of expressions.
Type 'help exec' for more information on execution of statements.""", self.m_stdout)
def help_eval(self):
_print("""eval <expr>
(shorthand - v)
Evaluate the python expression <expr> under the global and local name spaces
of the currently focused frame.
Example:
'eval locals()' - will display the dictionary of the local variables.
IMPORTANT: Any changes to the global name space will be discarded unless the
focused stack frame is the top most frame.
Type 'help up' or 'help down' for more information on focused frames.""", self.m_stdout)
help_v = help_eval
def help_exec(self):
_print("""exec <stmt>
(shorthand - x)
Execute the python suite <stmt> under the global and local name spaces
of the currently focused frame.
Example:
'exec i += 1'
IMPORTANT: Any changes to the global name space will be discarded unless the
focused stack frame is the top most frame.
Type 'help up' or 'help down' for more information on focused frames.""", self.m_stdout)
help_x = help_exec
def help_encoding(self):
_print("""encoding [<encoding> [, raw]]
Set the source encoding for the exec and eval commands.
Without an argument returns the current encoding.
The specified encoding can be either 'auto' or any encoding accepted
by the codecs module. If 'auto' is specified, the source encoding of
the active scope will be used, which is utf-8 by default.
The default encoding value is 'auto'.
If 'raw' is specified, strings returned by the eval command
will represent non ASCII characters as an escape sequence.""", self.m_stdout)
def help_env(self):
_print("""env [-d key | key = value]
Set the environment variables mapping. This mapping is used
when a new script is launched to modify its environment.
Example for a mapping on Windows:
env Path = %Path%;c:\\mydir
Example for a mapping on Linux:
env PATH = $PATH:~/mydir
To delete the mapping for PATH
env -d PATH
Without an argument returns the current list of mappings.
Note that the mapping will be evaluated and used to modify
the environment after the debugger engine at the debuggee
has imported the modules it requires. The order in which the
mappings will be evaluated and applied is:
last set, last evaluated.""", self.m_stdout)
#
# ---------------------------------------- Replacement Functions ------------------------------------
#
def rpdb2_import_wrapper(*args, **kwargs):
if len(args) > 0:
name = args[0]
elif 'name' in kwargs:
name = kwargs['name']
else:
return g_import(*args, **kwargs)
if name in sys.modules:
return g_import(*args, **kwargs)
#
# rpdb2 avoids stepping through this
# function (rpdb2_import_wrapper) to
# prevent confusion when stepping into
# an import statement.
#
m = g_import(*args, **kwargs)
if name != 'gtk':
return m
try:
m.gdk.threads_init()
return m
except:
pass
try:
m.threads_init()
return m
except:
pass
return m
g_import = None
if __name__ == 'rpdb2' and g_builtins_module.__import__ != rpdb2_import_wrapper:
g_import = g_builtins_module.__import__
g_builtins_module.__import__ = rpdb2_import_wrapper
def __find_eval_exec_frame_in_stack():
f = sys._getframe(0)
while f != None:
filename = f.f_code.co_filename
name = f.f_code.co_name
if DEBUGGER_FILENAME in filename and name in ['_evaluate', '_execute'] and 'redirect_exc_info' in f.f_locals:
return f
f = f.f_back
return None
def __exc_info():
f = __find_eval_exec_frame_in_stack()
if f == None:
return g_sys_exc_info()
try:
frame_index = f.f_locals['frame_index']
fException = f.f_locals['fException']
e = g_debugger.get_exception(frame_index, fException)
exc_info = (e['type'], e['value'], e['traceback'])
return exc_info
except:
return g_sys_exc_info()
g_sys_exc_info = None
if __name__ == 'rpdb2' and 'exc_info' in dir(sys) and sys.exc_info != __exc_info:
g_sys_exc_info = sys.exc_info
sys.exc_info = __exc_info
def __setrecursionlimit(rl):
global g_recursionlimit
print_debug('rl = %d' % rl)
g_recursionlimit = max(rl, 64)
rl = g_recursionlimit
if sys.version_info[:2] == (2, 6):
rl *= 3
return g_sys_setrecursionlimit(rl + 64)
g_sys_setrecursionlimit = None
if __name__ == 'rpdb2' and 'setrecursionlimit' in dir(sys) and sys.setrecursionlimit != __setrecursionlimit:
g_sys_setrecursionlimit = sys.setrecursionlimit
sys.setrecursionlimit = __setrecursionlimit
__setrecursionlimit(sys.getrecursionlimit())
def __find_debugger_frame():
frame = None
f = sys._getframe(0)
while f != None:
filename = f.f_code.co_filename
name = f.f_code.co_name
if DEBUGGER_FILENAME in filename and (name.startswith('trace_dispatch') or name == 'profile'):
frame = f
f = f.f_back
return frame
class CSignalHandler:
def __del__(self):
while len(g_signals_pending) != 0:
(handler, signum, frameobj) = g_signals_pending.pop(0)
print_debug('Handling pending signal: %s, %s' % (repr(signum), repr(frameobj)))
try:
handler(signum, frameobj)
except:
#
# Can not raise from inside a destructor. Report that handler
# exception will be ignored.
#
(t, v, tb) = sys.exc_info()
_t = safe_repr(t)
if _t.startswith("<type '"):
_t = _t.split("'")[1]
event = CEventSignalException(signum, '%s: %s' % (_t, safe_repr(v)))
g_debugger.m_event_dispatcher.fire_event(event)
def signal_handler(signum, frameobj):
frame = __find_debugger_frame()
if frame == None:
#
# A debugger tracing frame was not found in the stack.
# This means that the handler can be run without risk
# for state corruption.
#
handler = signal.getsignal(signum)
return handler(signum, frameobj)
if frame.f_code.co_name == 'profile' and frame.f_locals['event'] != 'return':
#
# signal was caught inside the profile hook but not while
# doing some debugger stuff. Call the handler but in case
# of exception schedule the debugger to re-enable the
# profile hook.
#
try:
handler = signal.getsignal(signum)
return handler(signum, frameobj)
except:
ctx = g_debugger.get_ctx(thread.get_ident())
ctx.set_tracers(fsignal_exception = True)
raise
#
# Set the handler to be run when the debugger is about
# to return from the tracing code.
#
print_debug('Intercepted signal: %s, %s' % (repr(signum), repr(frameobj)))
f = frameobj
while f != None:
if f == frame:
frameobj = frame.f_back
break
f = f.f_back
handler = signal.getsignal(signum)
g_signals_pending.append((handler, signum, frameobj))
if not 'signal_handler' in frame.f_locals:
frame.f_locals.update({'signal_handler': CSignalHandler()})
event = CEventSignalIntercepted(signum)
g_debugger.m_event_dispatcher.fire_event(event)
if signum == signal.SIGINT and g_debugger.is_waiting_for_attach():
g_debugger.set_request_go_timer(0)
def __getsignal(signum):
handler = g_signal_handlers.get(signum, g_signal_getsignal(signum))
return handler
g_signal_getsignal = None
if __name__ == 'rpdb2' and 'getsignal' in dir(signal) and signal.getsignal != __getsignal:
g_signal_getsignal = signal.getsignal
signal.getsignal = __getsignal
def __signal(signum, handler):
old_handler = __getsignal(signum)
if handler in [signal.SIG_IGN, signal.SIG_DFL]:
g_signal_signal(signum, handler)
return old_handler
g_signal_signal(signum, signal_handler)
g_signal_handlers[signum] = handler
return old_handler
g_signal_signal = None
if __name__ == 'rpdb2' and 'signal' in dir(signal) and signal.signal != __signal:
g_signal_signal = signal.signal
signal.signal = __signal
"""
def __setprofile(foo):
global g_profile
print_debug('*** setprofile to %s' % repr(foo))
traceback.print_stack(file = sys.__stderr__)
if thread_get_name(current_thread()) == 'MainThread':
g_profile = foo
g_sys_setprofile(foo)
g_sys_setprofile = None
if __name__ == 'rpdb2' and sys.setprofile != __setprofile:
g_sys_setprofile = sys.setprofile
sys.setprofile = __setprofile
"""
def __fork():
global g_forktid
if not g_fignorefork:
g_forktid = setbreak()
#
# os.fork() has been called.
#
# You can choose if you would like the debugger
# to continue with the parent or child fork with
# the 'fork' console command.
#
# For example: 'fork child' or 'fork parent'
# Type: 'help fork' for more information.
#
# WARNING:
# On some Posix OS such as FreeBSD,
# Stepping into the child fork can result in
# termination of the child process.
#
# *** RPDB2 SAYS: Read the entire comment! ***
#
return g_os_fork()
g_os_fork = None
if __name__ == 'rpdb2' and 'fork' in dir(os) and os.fork != __fork:
g_os_fork = os.fork
os.fork = __fork
def __exit(n):
global g_fos_exit
if type(n) == int:
g_fos_exit = (setbreak() != None)
#
# os._exit(n) has been called.
#
# Stepping on from this point will result
# in program termination.
#
return g_os_exit(n)
g_os_exit = None
if __name__ == 'rpdb2' and '_exit' in dir(os) and os._exit != __exit:
g_os_exit = os._exit
os._exit = __exit
def __close(fd):
global g_fos_exit
try:
if fd == g_server.m_server.socket._sock.fileno():
g_fos_exit = (setbreak() != None)
except:
pass
#
# os.close(fd) has been called by the debugged script to close
# the debugger communication channel.
#
# This can normally happen if it is trying to spawn a new process
# in its place.
#
# Stepping on from this point will result in termination of the
# debugging session.
#
return g_os_close(fd)
g_os_close = None
if __name__ == 'rpdb2' and 'close' in dir(os) and os.close != __close:
g_os_close = os.close
os.close = __close
def __dup2(fd, fd2):
global g_fos_exit
try:
if fd2 == g_server.m_server.socket._sock.fileno():
g_fos_exit = (setbreak() != None)
except:
pass
#
# os.dup2(fd, fd2) has been called by the debugged script to close
# the debugger communication channel.
#
# This can normally happen if it is trying to spawn a new process
# in its place.
#
# Stepping on from this point will result in termination of the
# debugging session.
#
return g_os_dup2(fd, fd2)
g_os_dup2 = None
if __name__ == 'rpdb2' and 'dup2' in dir(os) and os.dup2 != __dup2:
g_os_dup2 = os.dup2
os.dup2 = __dup2
def __execv(path, args):
global g_exectid
if os.path.isfile(path) and not g_fignorefork:
g_exectid = setbreak()
#
# os.execv() has been called.
#
# Stepping on from this point will result
# in termination of the debug session if
# the exec operation completes successfully.
#
return g_os_execv(path, args)
g_os_execv = None
if __name__ == 'rpdb2' and 'execv' in dir(os) and os.execv != __execv:
g_os_execv = os.execv
os.execv = __execv
def __execve(path, args, env):
global g_exectid
if os.path.isfile(path) and not g_fignorefork:
g_exectid = setbreak()
#
# os.execve() has been called.
#
# Stepping on from this point will result
# in termination of the debug session if
# the exec operation completes successfully.
#
return g_os_execve(path, args, env)
g_os_execve = None
if __name__ == 'rpdb2' and 'execve' in dir(os) and os.execve != __execve:
g_os_execve = os.execve
os.execve = __execve
def __excepthook(type, value, traceback, next_excepthook, index):
if index + 1 < len(g_excepthooks):
return next_excepthook(type, value, traceback)
if traceback.tb_frame.f_back == None:
return next_excepthook(type, value, traceback)
if not g_debugger.m_ftrap:
return next_excepthook(type, value, traceback)
settrace()
ctx = g_debugger.get_ctx(thread.get_ident())
ctx.m_fUnhandledException = True
setbreak()
#
# Debuggee breaks (pauses) here
# on unhandled exceptions.
# Use analyze mode for post mortem.
# type 'help analyze' for more information.
#
return next_excepthook(type, value, traceback)
g_excepthooks = []
g_excepthook = None
#
# Set the debugger hook for unhandled exceptions. It only kicks in on
# unhandled exceptions that are declared unhandled in the middle of the
# stack as in wxPython. Normally unhandled exceptions are trapped at the
# last stack frame by another mechanism.
#
# This mechaism is designed to work even if the excepthook is over-written.
# by the debugged script.
#
def set_excepthook():
global g_excepthook
if len(g_excepthooks) >= 4:
#
# Give up. We have been over-written 4 times already.
#
return
next_excepthook = sys.excepthook
index = len(g_excepthooks)
eh = lambda type, value, traceback: __excepthook(type, value, traceback, next_excepthook, index)
g_excepthooks.append(eh)
g_excepthook = eh
sys.excepthook = eh
def __function_wrapper(function, args, kwargs):
__settrace(depth = 1)
#
# Debuggee breaks (pauses) here
# on unhandled exceptions.
# Use analyze mode for post mortem.
# type 'help analyze' for more information.
#
return function(*args, **kwargs)
def __start_new_thread(function, args, kwargs = {}):
return g_thread_start_new_thread(__function_wrapper, (function, args, kwargs))
g_thread_start_new_thread = None
if __name__ == 'rpdb2' and 'start_new_thread' in dir(thread) and thread.start_new_thread != __start_new_thread:
g_thread_start_new_thread = thread.start_new_thread
thread.start_new_thread = __start_new_thread
#
# ---------------------------------------- main ------------------------------------
#
def __settrace(depth = 2):
if g_debugger is None:
return
f = sys._getframe(depth)
g_debugger.settrace(f, f_break_on_init = False)
def __setbreak(depth = 2):
if g_debugger is None:
return
f = sys._getframe(depth)
g_debugger.setbreak(f)
return thread.get_ident()
def __set_temp_breakpoint(path, scopename, lineno):
return g_debugger.m_bp_manager.set_temp_breakpoint(path, scopename, lineno)
def _atexit(fabort = False):
if g_fignore_atexit:
return
print_debug("Entered _atexit() in pid %d" % _getpid())
if g_debugger is None:
return
if not fabort:
g_debugger.stoptrace()
g_debugger.send_event_exit()
time.sleep(1.0)
g_server.shutdown()
g_debugger.shutdown()
if not fabort:
return
if hasattr(os, 'kill') and hasattr(signal, 'SIGKILL'):
os.kill(os.getpid(), signal.SIGKILL)
else:
os.abort()
def my_pickle_import(*args, **kwargs):
name = ''
if len(args) > 0:
name = args[0]
if 'name' in kwargs:
name = kwargs['name']
if name == 'rpdb2':
return
return __import__(*args, **kwargs)
#
# MOD
#
def workaround_import_deadlock():
if is_py3k() and hasattr(pickle, '_Pickler'):
pickle.Pickler = pickle._Pickler
xmlrpclib.loads(XML_DATA)
s = as_bytes("(S'hello'\np0\nS'world'\np1\ntp2\n.")
#s = as_bytes('(S\'\\xb3\\x95\\xf9\\x1d\\x105c\\xc6\\xe2t\\x9a\\xa5_`\\xa59\'\np0\nS"(I0\\nI1\\nS\'5657827\'\\np0\\n(S\'server_info\'\\np1\\n(tI0\\ntp2\\ntp3\\n."\np1\ntp2\n.0000000')
pickle.loads(s)
pickle.__import__ = my_pickle_import
def __start_embedded_debugger(_rpdb2_pwd, fAllowUnencrypted, fAllowRemote, timeout, source_provider, fDebug, depth):
global g_server
global g_debugger
global g_fDebug
global g_initial_cwd
global g_source_provider_aux
_rpdb2_pwd = as_unicode(_rpdb2_pwd)
try:
g_server_lock.acquire()
if g_debugger is not None and timeout == 0:
f = sys._getframe(depth)
g_debugger.settrace(f, f_break_on_init = False)
return
if g_debugger is not None:
f = sys._getframe(depth)
g_debugger.record_client_heartbeat(0, True, False)
g_debugger.setbreak(f)
return
if not is_valid_pwd(_rpdb2_pwd):
raise BadArgument(STR_PASSWORD_BAD)
g_fDebug = fDebug
g_source_provider_aux = source_provider
workaround_import_deadlock()
if (not fAllowUnencrypted) and not is_encryption_supported():
raise EncryptionNotSupported
f = sys._getframe(depth)
filename = calc_frame_path(f)
#
# This is an attempt to address the Python problem of recording only
# relative paths in __file__ members of modules in the following case.
#
if sys.path[0] == '':
try:
g_initial_cwd = [getcwd(), getcwdu()]
except UnicodeDecodeError:
#
# This exception can be raised in py3k (alpha) on nt.
#
g_initial_cwd = [getcwdu()]
atexit.register(_atexit)
g_debugger = CDebuggerEngine(fembedded = True)
g_server = CDebuggeeServer(filename, g_debugger, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote)
g_server.start()
if timeout == 0:
g_debugger.settrace(f, f_break_on_init = False)
return
g_debugger.settrace(f, timeout = timeout)
finally:
g_server_lock.release()
def StartServer(args, fchdir, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, rid):
assert(is_unicode(_rpdb2_pwd))
global g_server
global g_debugger
global g_module_main
try:
ExpandedFilename = FindFile(args[0])
_path = g_found_unicode_files.get(ExpandedFilename, ExpandedFilename)
if fchdir:
os.chdir(os.path.dirname(_path))
if ExpandedFilename in g_found_unicode_files:
prefix = os.path.join(getcwdu(), '')
_path = _path.replace(winlower(prefix), '')
except IOError:
_print('File ' + args[0] + ' not found.')
return
print_debug('Starting server with: %s' % ExpandedFilename)
workaround_import_deadlock()
#
# Replace the rpdb2.py directory with the script directory in
# the search path
#
spe = ExpandedFilename
if os.path.islink(ExpandedFilename):
spe = os.path.realpath(ExpandedFilename)
sys.path[0] = os.path.dirname(spe)
encoding = detect_locale()
argv = [as_string(arg, encoding) for arg in args]
sys.argv = argv
atexit.register(_atexit)
g_debugger = CDebuggerEngine()
g_server = CDebuggeeServer(ExpandedFilename, g_debugger, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, rid)
g_server.start()
try:
g_debugger.m_bp_manager.set_temp_breakpoint(ExpandedFilename, '', 1, fhard = True)
except:
pass
f = sys._getframe(0)
g_debugger.settrace(f, f_break_on_init = False, builtins_hack = ExpandedFilename)
g_module_main = -1
del sys.modules['__main__']
#
# An exception in this line occurs if
# there is a syntax error in the debugged script or if
# there was a problem loading the debugged script.
#
imp.load_source('__main__', _path)
def StartClient(command_line, fAttach, fchdir, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host):
assert(is_unicode(command_line))
assert(_rpdb2_pwd == None or is_unicode(_rpdb2_pwd))
if (not fAllowUnencrypted) and not is_encryption_supported():
_print(STR_ENCRYPTION_SUPPORT_ERROR)
return 2
sm = CSessionManager(_rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host)
c = CConsole(sm)
c.start()
time.sleep(1.0)
try:
if fAttach:
sm.attach(command_line)
elif command_line != '':
sm.launch(fchdir, command_line)
except (socket.error, CConnectionException):
sm.report_exception(*sys.exc_info())
except CException:
sm.report_exception(*sys.exc_info())
except:
sm.report_exception(*sys.exc_info())
print_debug_exception(True)
c.join()
sm.shutdown()
def PrintUsage(fExtended = False):
scriptName = os.path.basename(sys.argv[0])
_print(""" %(rpdb)s [options] [<script-name> [<script-args>...]]
%(rpdb)s uses the client-server model where the debugger UI/console is
the client and the debugged script is the server (also called debuggee).
The client and the server are separate processes and communicate over
sockets.
Example: The following command starts the debugger UI/console and then
launches and attaches to the specified script:
%(rpdb)s some_script.py
Options can be a combination of the following:
-h, --help Print this help.
-d, --debuggee Start the debugged script (server) and wait for a
debugger console (client) to attach.
-a, --attach Start the debugger console (client) and attach to the
specified debugged script (server).
-o, --host= Specify host (or IP address) for remote connections.
-r, --remote Allow debuggees to accept connections from remote machines.
-e, --encrypt Force encrypted socket communication.
-p, --pwd= Specify password for socket communication.
This flag is available only on Windows. On other
systems the password will be queried interactively
if it is needed.
-s, --screen Use the Unix screen utility when starting the debuggee.
Note that the debugger should be started as follows:
screen rpdb2 -s [options] [<script-name> [<script-args>...]]
-c, --chdir Change the working directory to that of the launched
script.
-v, --version Print version information.
--debug Debug prints.
Note that each option is available in short form (example -e) and in a
long form (example --encrypt).
Options that end with '=' accept an argument that should follow without
a space. For example to specify 192.168.0.10 as host use the following
option:
long form: --host=192.168.0.10
short form: -o192.168.0.10
""" % {"rpdb": scriptName})
if not fExtended:
return
_print(__doc__)
def main(StartClient_func = StartClient, version = RPDB_TITLE):
global g_fScreen
global g_fDebug
global g_fFirewallTest
create_rpdb_settings_folder()
encoding = detect_locale()
argv = [as_unicode(arg, encoding) for arg in sys.argv]
try:
options, _rpdb2_args = getopt.getopt(
argv[1:],
'hdao:rtep:scv',
['help', 'debugee', 'debuggee', 'attach', 'host=', 'remote', 'plaintext', 'encrypt', 'pwd=', 'rid=', 'screen', 'chdir', 'base64=', 'nofwtest', 'version', 'debug']
)
except getopt.GetoptError:
PrintUsage()
return 2
fWrap = False
fAttach = False
fSpawn = False
fStart = False
encoded_path = None
secret = None
host = None
_rpdb2_pwd = None
fchdir = False
fAllowRemote = False
fAllowUnencrypted = True
for o, a in options:
if o in ['-h', '--help']:
PrintUsage()
return 0
if o in ['-v', '--version']:
_print(version)
return 0
if o in ['--debug']:
g_fDebug = True
if o in ['-d', '--debugee', '--debuggee']:
fWrap = True
if o in ['-a', '--attach']:
fAttach = True
if o in ['-o', '--host']:
host = a
if o in ['-r', '--remote']:
fAllowRemote = True
if o in ['-t', '--plaintext']:
fAllowUnencrypted = True
if o in ['-e', '--encrypt']:
fAllowUnencrypted = False
if o in ['-p', '--pwd']:
_rpdb2_pwd = a
if o in ['--rid']:
secret = a
if o in ['-s', '--screen']:
g_fScreen = True
if o in ['-c', '--chdir']:
fchdir = True
if o in ['--base64']:
encoded_path = a
if o in ['--nofwtest']:
g_fFirewallTest = False
arg = None
argv = None
options = None
o = None
a = None
if (_rpdb2_pwd is not None) and (os.name != 'nt'):
_print(STR_PASSWORD_NOT_SUPPORTED)
return 2
if _rpdb2_pwd is not None and not is_valid_pwd(_rpdb2_pwd):
_print(STR_PASSWORD_BAD)
return 2
if fWrap and (len(_rpdb2_args) == 0):
_print("--debuggee option requires a script name with optional <script-arg> arguments")
return 2
if fWrap and fAttach:
_print("--debuggee and --attach can not be used together.")
return 2
if fAttach and (len(_rpdb2_args) == 0):
_print("--attach option requires a script name to attach to.")
return 2
if fAttach and (len(_rpdb2_args) > 1):
_print("--attach option does not accept <script-arg> arguments.")
return 2
if fAttach and fAllowRemote:
_print("--attach and --remote can not be used together.")
return 2
if (host is not None) and not fAttach:
_print("--host can only be used together with --attach.")
return 2
if host is None:
host = LOCALHOST
fSpawn = (len(_rpdb2_args) != 0) and (not fWrap) and (not fAttach)
fStart = (len(_rpdb2_args) == 0)
if fchdir and not (fWrap or fSpawn):
_print("-c can only be used when launching or starting a script from command line.")
return 2
assert (fWrap + fAttach + fSpawn + fStart) == 1
if fAttach and (os.name == POSIX):
try:
int(_rpdb2_args[0])
_rpdb2_pwd = read_pwd_file(_rpdb2_args[0])
delete_pwd_file(_rpdb2_args[0])
except (ValueError, IOError):
pass
if (secret is not None) and (os.name == POSIX):
_rpdb2_pwd = read_pwd_file(secret)
if (fWrap or fAttach) and not is_valid_pwd(_rpdb2_pwd):
_print(STR_PASSWORD_MUST_BE_SET)
while True:
_rpdb2_pwd = _raw_input(STR_PASSWORD_INPUT)
if is_valid_pwd(_rpdb2_pwd):
break
_print(STR_PASSWORD_BAD)
_print(STR_PASSWORD_CONFIRM)
if fWrap or fSpawn:
try:
if encoded_path != None:
_b = as_bytes(encoded_path).translate(g_safe_base64_from)
_u = base64.decodestring(_b)
_path = as_unicode(_u)
_rpdb2_args[0] = _path
FindFile(_rpdb2_args[0])
except IOError:
_print(STR_FILE_NOT_FOUND % _rpdb2_args[0])
return 2
if fWrap:
if (not fAllowUnencrypted) and not is_encryption_supported():
_print(STR_ENCRYPTION_SUPPORT_ERROR)
return 2
StartServer(_rpdb2_args, fchdir, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, secret)
elif fAttach:
StartClient_func(_rpdb2_args[0], fAttach, fchdir, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host)
elif fStart:
StartClient_func(as_unicode(''), fAttach, fchdir, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host)
else:
if len(_rpdb2_args) == 0:
_rpdb2_args = ''
else:
_rpdb2_args = '"' + '" "'.join(_rpdb2_args) + '"'
StartClient_func(_rpdb2_args, fAttach, fchdir, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host)
return 0
if __name__ == '__main__':
import rpdb2
#
# Debuggee breaks (pauses) here
# on unhandled exceptions.
# Use analyze mode for post mortem.
# type 'help analyze' for more information.
#
ret = rpdb2.main()
#
# Debuggee breaks (pauses) here
# before program termination.
#
# You can step to debug any exit handlers.
#
rpdb2.setbreak()
|
serviceSkeleton.py | import socket
import threading
# Starts a server in a specified port and host.
# Loops forever, waiting for connections.
# When it receives a connection, it calls serverFunc in a separate thread.
# serverFunc has to receive as arguments the connection socket and address object.
def startService(port,serviceFunc,host="127.0.0.1"):
# Try to bind to the port in any of the available interfaces.
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error as msg:
s = None
continue
try:
s.bind(sa)
s.listen(1)
except socket.error as msg:
s.close()
s = None
continue
break
if s is None:
print 'could not open socket'
return None
while True:
conn, addr = s.accept()
threading.Thread(target=serviceFunc,args=(conn,addr)).start()
|
service.py | # -*- coding: utf-8 -*-
from datetime import datetime
import io
import time
import threading
from wsgiref.validate import validator
from wsgiref.simple_server import make_server
EXCHANGE_FILE = "./exchange.dat"
def update_exchange_file():
"""
Writes the current date and time every 10 seconds into the exchange file.
The file is created if it does not exist.
"""
print("Will update to exchange file")
while True:
with io.open(EXCHANGE_FILE, "w") as f:
f.write(datetime.now().isoformat())
time.sleep(10)
def simple_app(environ, start_response):
"""
Read the content of the exchange file and return it.
"""
start_response('200 OK', [('Content-type', 'text/plain')])
with io.open(EXCHANGE_FILE) as f:
return [f.read().encode('utf-8')]
if __name__ == '__main__':
t = threading.Thread(target=update_exchange_file)
t.start()
httpd = make_server('', 8080, simple_app)
print("Listening on port 8080....")
try:
httpd.serve_forever()
except KeyboardInterrupt:
httpd.shutdown()
t.join(timeout=1) |
main.py | # -*- coding: utf-8 -*-
# Created by HRex on 2020/12/22
import re
import sys
import threading
import time
from time import sleep
from PyQt5.QtCore import QTime
from PyQt5.QtWidgets import QApplication, QMainWindow
import serial
import SmartFan
# Global Variable
portx = 0 #端口号
bps = 0 #波特率
num_data = 0 #数据位
num_stop = 0 #停止位
num_parity_check = 0 #校验位
timex = 0.1 #超时设置
SCS = None
ser = None
SERIAL_STATUS = 0
# Variable ends
# Status Variable
control_stage = 0
time_stage = "9999"
min_stage = "00"
sec_stage = "00"
fan_stage = 0
last_fan_stage = 0
# Variable ends
def prog_exit():
app = QApplication(sys.argv)
app.quit()
def high_speed_set():
global control_stage, time_stage
control_stage = 3
data_send()
def mid_speed_set():
global control_stage, time_stage
control_stage = 2
data_send()
def low_speed_set():
global control_stage, time_stage
control_stage = 1
data_send()
def stop_speed_set():
global control_stage, time_stage
control_stage = 0
data_send()
def stop_time_set():
global time_stage
time = ui.timeEdit.time().toString()
time_stage = str(time[3]+time[4]+time[6]+time[7])
data_send()
def stop_time_cancle():
global time_stage
time_stage = "9999"
data_send()
def data_from_c51_set(data):
global min_stage,sec_stage,fan_stage,time_stage,last_fan_stage
pattern = "(?<=\[).*?(?=\])"
find_data = re.findall(pattern, data)
current_stage = int(find_data[-1])
if int(current_stage / 10) <= 6000:
fan_stage = int(current_stage % 10)
current_stage = int(current_stage / 10)
min_stage = int(current_stage / 100)
sec_stage = int(current_stage % 100)
if min_stage < 10:
min_stage = "0" + str(min_stage)
if sec_stage < 10:
sec_stage = "0" +str(sec_stage)
else:
fan_stage = int(current_stage % 10)
min_stage = "99"
sec_stage = "99"
time_stage = str(min_stage) + str(sec_stage)
if fan_stage == 0:
ui.label_13.setText("STOP")
elif fan_stage == 1:
ui.label_13.setText("LOW")
elif fan_stage == 2:
ui.label_13.setText("MID")
elif fan_stage == 3:
ui.label_13.setText("HIGH")
else:
ui.label_13.setText("DATA ERROR!")
if last_fan_stage != fan_stage:
fan_stage_change_from_c51_set()
last_fan_stage = fan_stage #保存状态
def fan_stage_set():
global control_stage
file = open("fan_stage.txt", "a", encoding='utf-8')
if control_stage == 0:
line = "["+str(time.ctime())+"]" + "上位机控制" + "FAN_STAGE: " + "STOP!" + "\n"
elif control_stage == 1:
line = "["+str(time.ctime())+"]" + "上位机控制" + "FAN_STAGE: " + "LOW!" + "\n"
elif control_stage == 2:
line = "["+str(time.ctime())+"]" + "上位机控制" + "FAN_STAGE: " + "MID!" + "\n"
elif control_stage == 3:
line = "["+str(time.ctime())+"]" + "上位机控制" + "FAN_STAGE: " + "HIGH!" + "\n"
file.write(line)
file.close()
def fan_stage_change_from_c51_set():
global fan_stage, last_fan_stage
file = open("fan_stage.txt", "a", encoding='utf-8')
if fan_stage == 0:
line = "[" + str(time.ctime()) + "]" + "下位机更改" + "FAN_STAGE: " + "STOP!" + "\n"
elif fan_stage == 1:
line = "[" + str(time.ctime()) + "]" + "下位机更改" + "FAN_STAGE: " + "LOW!" + "\n"
elif fan_stage == 2:
line = "[" + str(time.ctime()) + "]" + "下位机更改" + "FAN_STAGE: " + "MID!" + "\n"
elif fan_stage == 3:
line = "[" + str(time.ctime()) + "]" + "下位机更改" + "FAN_STAGE: " + "HIGH!" + "\n"
file.write(line)
file.close()
def data_send():
global ser, SERIAL_STATUS
if SERIAL_STATUS == 1:
try:
SCS = "[" + time_stage + str(control_stage) + "]"
# WRITE
result = ser.write(SCS.encode("utf-8"))
print("写总字节数:", result, SCS)
ui.label_11.setText(SCS)
fan_stage_set()
except Exception as e:
ui.label_11.setText("SEND_ERROR!")
else:
ui.label_11.setText("串口未开启!")
def data_received(thread_name):
global ser, SERIAL_STATUS, min_stage, sec_stage
while True:
if SERIAL_STATUS == 1:
try:
if not ser.in_waiting:
ui.label_10.setText("串口接收中...")
if ser.in_waiting:
data_from_c51 = ser.read(ser.in_waiting).decode("utf-8")
sleep(0.02)
data_from_c51_set(data_from_c51)
except Exception as e:
try:
ui.label_10.setText("ERROR!!!!")
except Exception as e:
pass
data_from_c51 = None
else:
try:
ui.label_10.setText("串口未开启")
sleep(0.2)
except Exception as e:
pass
try:
if min_stage == "99":
ui.LED_LIGHT.display("00:00")
else:
ui.LED_LIGHT.display(str(min_stage) + ":" + str(sec_stage))
except Exception as e:
pass
def serial_open():
global ser, SERIAL_STATUS
if SERIAL_STATUS == 0:
try:
# DATA PROCEED
portx = ui.comboBox_2.currentText()
bps = ui.comboBox.currentText()
num_data = ui.comboBox_4.currentText()
if num_data == "5": num_data = serial.FIVEBITS
if num_data == "6": num_data = serial.SIXBITS
if num_data == "7": num_data = serial.SEVENBITS
if num_data == "8": num_data = serial.EIGHTBITS
num_stop = ui.comboBox_3.currentText()
if num_stop == "1": num_stop = serial.STOPBITS_ONE
if num_stop == "1.5": num_stop = serial.STOPBITS_ONE_POINT_FIVE
if num_stop == "2": num_stop = serial.STOPBITS_TWO
num_parity_check = ui.comboBox_5.currentText()
if num_parity_check == "无校验": num_parity_check = serial.PARITY_NONE
if num_parity_check == "奇校验": num_parity_check = serial.PARITY_ODD
if num_parity_check == "偶校验": num_parity_check = serial.PARITY_EVEN
print(portx, bps, num_data, num_stop, num_parity_check)
# OPEN SERIES
ser = serial.Serial(port=portx, baudrate=bps, bytesize=num_data,
parity=num_parity_check, stopbits=num_stop, timeout=timex)
ui.serial_now.setText("已连接上" + portx)
SERIAL_STATUS = 1
ui.pushButton.setText("关闭串口")
except serial.SerialException:
ui.serial_now.setText("连接" + portx + "失败")
SERIAL_STATUS = 0
else:
try:
ser.close()
ui.serial_now.setText("关闭成功")
ui.pushButton.setText("打开串口")
SERIAL_STATUS = 0
except Exception as e:
ui.serial_now.setText("关闭失败")
SERIAL_STATUS = 1
if __name__ == '__main__':
app = QApplication(sys.argv)
MainWindow = QMainWindow()
ui = SmartFan.Ui_mainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
# Thread
Received_Thread = threading.Thread(target=data_received, args=("Received_Thread",))
Received_Thread.setDaemon(True)
Received_Thread.start()
# Set default num
ui.comboBox.setCurrentIndex(3)
ui.comboBox_3.setCurrentIndex(0)
ui.comboBox_4.setCurrentIndex(3)
ui.comboBox_5.setCurrentIndex(0)
ui.LED_LIGHT.setDigitCount(5)
# Set Maxium Time
ui.timeEdit.setDisplayFormat("mm:ss")
ui.timeEdit.setMinimumTime(QTime(00,00,15))
ui.timeEdit.setMaximumTime(QTime(00,59,59))
# Link to the menu function
ui.actionExit.triggered.connect(prog_exit)
ui.actionOpen.triggered.connect(high_speed_set)
# Link to the button function
ui.pushButton.clicked.connect(serial_open) #Open Serial
ui.pushButton_2.clicked.connect(high_speed_set) #High Speed
ui.pushButton_3.clicked.connect(mid_speed_set) #Mid Speed
ui.pushButton_4.clicked.connect(low_speed_set) #Low Speed
ui.pushButton_5.clicked.connect(stop_speed_set) #Stop
ui.time_setting.clicked.connect(stop_time_set)
ui.time_cancle.clicked.connect(stop_time_cancle)
sys.exit(app.exec_())
|
__init__.py | __all__ = ['ToastNotifier']
# standard library
import logging
from os import path, remove
from time import sleep
from threading import Thread
from pkg_resources import Requirement
from pkg_resources import resource_filename
from time import sleep
# 3rd party modules
from win32api import GetModuleHandle
from win32api import PostQuitMessage
from win32gui import CreateWindow
from win32gui import DestroyWindow
from win32gui import LoadIcon
from win32gui import LoadImage
from win32gui import RegisterClass
from win32gui import UnregisterClass
from win32gui import Shell_NotifyIcon
from win32gui import UpdateWindow
from win32gui import WNDCLASS
from win32gui import PumpMessages
try:
from PIL import Image
except ImportError:
Image = None
# Constants
CW_USEDEFAULT = -0x80000000
IDI_APPLICATION = 0x7f00
IMAGE_ICON = 0x1
LR_LOADFROMFILE = 0x16
LR_DEFAULTSIZE = 0x40
NIM_ADD = 0x0
NIM_MODIFY = 0x1
NIM_DELETE = 0x2
NIF_MESSAGE = 0x1
NIF_ICON = 0x2
NIF_TIP = 0x4
NIF_INFO = 0x10
WM_USER = 0x400
WS_OVERLAPPED = 0x0
WS_SYSMENU = 0x80000
PARAM_DESTROY = 0x404
PARAM_CLICKED = 0x405
# Class
class ToastNotifier(object):
'''Create a Windows 10 toast notification.
from: https://github.com/nuno-andre/Windows-10-Toast-Notifications
'''
def __init__(self):
self._thread = None
@staticmethod
def _decorator(func, callback=None):
'''
:param func: callable to decorate
:param callback: callable to run on mouse click within notification window
:return: callable
'''
def inner(*args, **kwargs):
kwargs.update({'callback': callback})
func(*args, **kwargs)
return inner
def _show_toast(
self, title, msg, icon_path, duration, callback_on_click
):
'''Notification settings.
:param title: notification title
:param msg: notification message
:param icon_path: path to the .ico file to custom notification
:para mduration: delay in seconds before notification self-destruction, None for no-self-destruction
'''
# Register the window class.
self.wc = WNDCLASS()
self.hinst = self.wc.hInstance = GetModuleHandle(None)
# must be a string
self.wc.lpszClassName = str(f"PythonTaskbar{title}")
# could instead specify simple mapping
self.wc.lpfnWndProc = self._decorator(self.wnd_proc, callback_on_click)
try:
self.classAtom = RegisterClass(self.wc)
except Exception as e:
#logging.error('Some trouble with classAtom (%s)', e)
self.classAtom = self.wc
style = WS_OVERLAPPED | WS_SYSMENU
self.hwnd = CreateWindow(
self.classAtom, 'Taskbar', style, 0, 0, CW_USEDEFAULT, CW_USEDEFAULT, 0, 0, self.hinst, None
)
UpdateWindow(self.hwnd)
# icon
if icon_path is not None:
icon_path = path.realpath(icon_path)
converted = False
if Image is not None and icon_path.split('.')[-1] != '.ico':
img = Image.open(icon_path)
new_name = icon_path.split('.')[:-1] + '.ico'
img.save(new_name)
icon_path = new_name
converted = True
else:
icon_path = resource_filename(Requirement.parse('win10toast'), 'win10toast/data/python.ico')
converted = False
icon_flags = LR_LOADFROMFILE | LR_DEFAULTSIZE
try:
hicon = LoadImage(self.hinst, icon_path, IMAGE_ICON, 0, 0, icon_flags)
if Image and path.exists(new_name and converted):
remove(new_name)
except Exception as e:
logging.error('Some trouble with the icon (%s): %s', icon_path, e)
hicon = LoadIcon(0, IDI_APPLICATION)
# Taskbar icon
flags = NIF_ICON | NIF_MESSAGE | NIF_TIP
nid = self.hwnd, 0, flags, WM_USER + 20, hicon, 'Tooltip'
Shell_NotifyIcon(NIM_ADD, nid)
Shell_NotifyIcon(
NIM_MODIFY,
(self.hwnd, 0, NIF_INFO, WM_USER + 20, hicon, 'Balloon Tooltip', msg, 200, title)
)
PumpMessages()
# take a rest then destroy
if duration is not None:
sleep(duration)
DestroyWindow(self.hwnd)
UnregisterClass(self.wc.lpszClassName, None)
return None
def show_toast(
self, title='Notification', msg='Here comes the message',
icon_path=None, duration=5, threaded=False, callback_on_click=None
):
'''Notification settings.
:param title: notification title
:param msg: notification message
:param icon_path: path to the .ico file to custom notification
:para mduration: delay in seconds before notification self-destruction, None for no-self-destruction
'''
args = title, msg, icon_path, duration, callback_on_click
if not threaded:
self._show_toast(*args)
else:
if self.notification_active():
# We have an active notification, let is finish so we don't spam them
return False
self._thread = Thread(target=self._show_toast, args=args)
self._thread.start()
return True
def notification_active(self):
'''See if we have an active notification showing'''
if self._thread is not None and self._thread.is_alive():
# We have an active notification, let is finish we don't spam them
return True
return False
def wnd_proc(self, hwnd, msg, wparam, lparam, **kwargs):
'''Messages handler method'''
if lparam == PARAM_CLICKED:
# callback goes here
if kwargs.get('callback'):
kwargs.pop('callback')()
self.on_destroy(hwnd, msg, wparam, lparam)
elif lparam == PARAM_DESTROY:
self.on_destroy(hwnd, msg, wparam, lparam)
def on_destroy(self, hwnd, msg, wparam, lparam):
'''Clean after notification ended.'''
nid = self.hwnd, 0
Shell_NotifyIcon(NIM_DELETE, nid)
PostQuitMessage(0)
return None
|
test_smbserver.py | #!/usr/bin/env python
# Impacket - Collection of Python classes for working with network protocols.
#
# SECUREAUTH LABS. Copyright (C) 2021 SecureAuth Corporation. All rights reserved.
#
# This software is provided under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Description:
# Basic unit tests for the SMB Server.
#
# Author:
# Martin Gallo (@martingalloar)
#
import unittest
from time import sleep
from os.path import exists, join
from os import mkdir, rmdir, remove
from multiprocessing import Process
from six import StringIO, BytesIO, b
from impacket.smbserver import isInFileJail, SimpleSMBServer
from impacket.smbconnection import SMBConnection, SessionError, compute_lmhash, compute_nthash
class SMBServerUnitTests(unittest.TestCase):
"""Unit tests for the SMBServer
"""
def test_isInFileJail(self):
"""Test validation of common prefix path.
"""
jail_path = "/tmp/jail_path"
self.assertTrue(isInFileJail(jail_path, "filename"))
self.assertTrue(isInFileJail(jail_path, "./filename"))
self.assertTrue(isInFileJail(jail_path, "../jail_path/filename"))
self.assertFalse(isInFileJail(jail_path, "/filename"))
self.assertFalse(isInFileJail(jail_path, "/tmp/filename"))
self.assertFalse(isInFileJail(jail_path, "../filename"))
self.assertFalse(isInFileJail(jail_path, "../../filename"))
class SimpleSMBServerFuncTests(unittest.TestCase):
"""Pseudo functional tests for the SimpleSMBServer.
These are pseudo functional as we're using our own SMBConnection classes. For a complete functional test
we should (and can) use for example Samba's smbclient or similar.
"""
address = "127.0.0.1"
port = 1445
username = "UserName"
password = "Password"
domain = "DOMAIN"
lmhash = compute_lmhash(password)
nthash = compute_nthash(password)
share_name = "share"
share_path = "jail_dir"
share_file = "jail_file"
share_new_file = "jail_new_file"
share_unjailed_file = "unjailed_new_file"
share_new_content = "some content"
def setUp(self):
"""Creates folders and files required for testing the list, put and get functionality.
"""
if not exists(self.share_path):
mkdir(self.share_path)
for f in [self.share_file, self.share_new_file]:
if not exists(join(self.share_path, f)):
with open(join(self.share_path, f), "a") as fd:
fd.write(self.share_new_content)
def tearDown(self):
"""Removes folders and files used for testing.
"""
for f in [self.share_file, self.share_new_file]:
if exists(join(self.share_path, f)):
remove(join(self.share_path, f))
if exists(self.share_unjailed_file):
remove(self.share_unjailed_file)
if exists(self.share_path):
rmdir(self.share_path)
self.stop_smbserver()
def get_smbserver(self):
return SimpleSMBServer(listenAddress=self.address, listenPort=int(self.port))
def start_smbserver(self, server):
"""Starts the SimpleSMBServer process.
"""
self.server_process = Process(target=server.start)
self.server_process.start()
def stop_smbserver(self):
"""Stops the SimpleSMBServer process and wait for insider threads to join.
"""
self.server_process.terminate()
sleep(0.5)
def test_smbserver_login(self):
"""Test authentication using password and LM/NTHash login.
"""
server = self.get_smbserver()
server.addCredential(self.username, 0, self.lmhash, self.nthash)
self.start_smbserver(server)
# Valid password login
client = SMBConnection(self.address, self.address, sess_port=int(self.port))
client.login(self.username, self.password)
client.close()
# Valid hash login
client = SMBConnection(self.address, self.address, sess_port=int(self.port))
client.login(self.username, '', lmhash=self.lmhash, nthash=self.nthash)
client.close()
# Invalid password login
with self.assertRaises(SessionError):
client = SMBConnection(self.address, self.address, sess_port=int(self.port))
client.login(self.username, 'SomeInvalidPassword')
client.close()
# Invalid username login
with self.assertRaises(SessionError):
client = SMBConnection(self.address, self.address, sess_port=int(self.port))
client.login("InvalidUser", "", lmhash=self.lmhash, nthash=self.nthash)
client.close()
# Invalid hash login
with self.assertRaises(SessionError):
client = SMBConnection(self.address, self.address, sess_port=int(self.port))
client.login(self.username, "", lmhash=self.nthash, nthash=self.lmhash)
client.close()
def test_smbserver_share_list(self):
"""Test listing files in a shared folder.
"""
server = SimpleSMBServer(listenAddress=self.address, listenPort=int(self.port))
server.addCredential(self.username, 0, self.lmhash, self.nthash)
server.addShare(self.share_name, self.share_path)
self.start_smbserver(server)
client = SMBConnection(self.address, self.address, sess_port=int(self.port))
client.login(self.username, self.password)
client.listPath(self.share_name, "/")
# Check path traversal in list as in #1066
with self.assertRaises(SessionError):
client.listPath(self.share_name, "../impacket/")
client.close()
def test_smbserver_share_put(self):
"""Test writing files to a shared folder.
"""
server = SimpleSMBServer(listenAddress=self.address, listenPort=int(self.port))
server.addCredential(self.username, 0, self.lmhash, self.nthash)
server.addShare(self.share_name, self.share_path)
self.start_smbserver(server)
client = SMBConnection(self.address, self.address, sess_port=int(self.port))
client.login(self.username, self.password)
local_file = StringIO(self.share_new_content)
client.putFile(self.share_name, self.share_new_file, local_file.read)
self.assertTrue(exists(join(self.share_path, self.share_new_file)))
with open(join(self.share_path, self.share_new_file), "r") as fd:
self.assertEqual(fd.read(), self.share_new_content)
# Check path traversal in put as in #1066
with self.assertRaises(SessionError):
client.putFile(self.share_name, join("..", self.share_unjailed_file), local_file.read)
self.assertFalse(exists(self.share_unjailed_file))
client.close()
def test_smbserver_share_get(self):
"""Test reading files from a shared folder.
"""
server = SimpleSMBServer(listenAddress=self.address, listenPort=int(self.port))
server.addCredential(self.username, 0, self.lmhash, self.nthash)
server.addShare(self.share_name, self.share_path)
self.start_smbserver(server)
client = SMBConnection(self.address, self.address, sess_port=int(self.port))
client.login(self.username, self.password)
local_file = BytesIO()
client.getFile(self.share_name, self.share_file, local_file.write)
local_file.seek(0)
self.assertEqual(local_file.read(), b(self.share_new_content))
# Check unexistent file
with self.assertRaises(SessionError):
client.getFile(self.share_name, "unexistent", local_file.write)
client.close()
if __name__ == "__main__":
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromTestCase(SMBServerUnitTests))
suite.addTests(loader.loadTestsFromTestCase(SimpleSMBServerFuncTests))
unittest.main(defaultTest='suite')
|
tempobj.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import copy
import glob
import hashlib
import json
import os
import platform
import subprocess
import sys
import tempfile
import stat
import threading
import time
import uuid
from .compat import PY26, pickle, six, builtins, futures
from .config import options
from .errors import NoSuchObject
from . import utils
from .accounts import AliyunAccount
TEMP_ROOT = utils.build_pyodps_dir('tempobjs')
SESSION_KEY = '%d_%s' % (int(time.time()), uuid.uuid4())
CLEANER_THREADS = 100
USER_FILE_RIGHTS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
CLEANUP_SCRIPT_TMPL = u"""
#-*- coding:utf-8 -*-
import os
import sys
import json
try:
os.unlink(os.path.realpath(__file__))
except Exception:
pass
temp_codes = json.loads({odps_info!r})
import_paths = json.loads({import_paths!r})
biz_ids = json.loads({biz_ids!r})
if sys.version_info[0] < 3:
if sys.platform == 'win32':
import_paths = [p.encode('mbcs') for p in import_paths]
else:
import_paths = [p.encode() for p in import_paths]
normed_paths = set(os.path.normcase(os.path.normpath(p)) for p in sys.path)
import_paths = [p for p in import_paths
if os.path.normcase(os.path.normpath(p)) not in normed_paths]
sys.path.extend(import_paths)
from odps import ODPS, tempobj
if os.environ.get('WAIT_CLEANUP') == '1':
tempobj.cleanup_timeout = None
else:
tempobj.cleanup_timeout = 5
tempobj.cleanup_mode = True
tempobj.host_pid = {host_pid}
tempobj.ObjectRepositoryLib.biz_ids = set(biz_ids)
for o_desc in temp_codes:
ODPS(**tempobj.compat_kwargs(o_desc))
os._exit(0)
""".lstrip()
cleanup_mode = False
cleanup_timeout = 0
host_pid = os.getpid()
class ExecutionEnv(object):
def __init__(self, **kwargs):
self.cleaned = False
self.os = os
self.sys = sys
self._g_env = copy.copy(globals())
self.is_windows = 'windows' in platform.platform().lower()
self.pid = os.getpid()
self.os_sep = os.sep
self.executable = sys.executable
self.six = six
import_paths = copy.deepcopy(sys.path)
package_root = os.path.dirname(__file__)
if package_root not in import_paths:
import_paths.append(package_root)
self.import_path_json = utils.to_text(json.dumps(import_paths, ensure_ascii=False))
self.builtins = builtins
self.io = __import__('io', fromlist=[''])
if six.PY3:
self.conv_bytes = (lambda s: s.encode() if isinstance(s, str) else s)
self.conv_unicode = (lambda s: s if isinstance(s, str) else s.decode())
else:
self.conv_bytes = (lambda s: s.encode() if isinstance(s, unicode) else s)
self.conv_unicode = (lambda s: s if isinstance(s, unicode) else s.decode())
self.subprocess = subprocess
self.temp_dir = tempfile.gettempdir()
self.template = CLEANUP_SCRIPT_TMPL
self.file_right = USER_FILE_RIGHTS
self.is_main_process = utils.is_main_process()
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
class TempObject(object):
__slots__ = []
_type = ''
_priority = 0
def __init__(self, *args, **kwargs):
for k, v in zip(self.__slots__, args):
setattr(self, k, v)
for k in self.__slots__:
if hasattr(self, k):
continue
setattr(self, k, kwargs.get(k))
def __hash__(self):
if self.__slots__:
return hash(tuple(getattr(self, k) for k in self.__slots__))
return super(TempObject, self).__hash__()
def __eq__(self, other):
if not isinstance(other, TempObject):
return False
if self._type != other._type:
return False
return all(getattr(self, k) == getattr(other, k) for k in self.__slots__)
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
return dict((slot, getattr(self, slot)) for slot in self.__slots__ if hasattr(self, slot))
def __setstate__(self, state):
for slot, value in state.items():
setattr(self, slot, value)
class TempTable(TempObject):
__slots__ = 'table', 'project'
_type = 'Table'
def drop(self, odps):
odps.run_sql('drop table if exists %s' % self.table, project=self.project)
class TempModel(TempObject):
__slots__ = 'model', 'project'
_type = 'OfflineModel'
def drop(self, odps):
try:
odps.delete_offline_model(self.model, self.project)
except NoSuchObject:
pass
class TempFunction(TempObject):
__slots__ = 'function', 'project'
_type = 'Function'
_priority = 1
def drop(self, odps):
try:
odps.delete_function(self.function, self.project)
except NoSuchObject:
pass
class TempResource(TempObject):
__slots__ = 'resource', 'project'
_type = 'Resource'
def drop(self, odps):
try:
odps.delete_resource(self.resource, self.project)
except NoSuchObject:
pass
class TempVolumePartition(TempObject):
__slots__ = 'volume', 'partition', 'project'
_type = 'VolumePartition'
def drop(self, odps):
try:
odps.delete_volume_partition(self.volume, self.partition, self.project)
except NoSuchObject:
pass
class ObjectRepository(object):
def __init__(self, file_name):
self._container = set()
self._file_name = file_name
if file_name and os.path.exists(file_name):
self.load()
def put(self, obj, dump=True):
self._container.add(obj)
if dump:
self.dump()
def cleanup(self, odps, use_threads=True):
cleaned = []
def _cleaner(obj):
try:
obj.drop(odps)
cleaned.append(obj)
except:
pass
if self._container:
if use_threads:
pool = futures.ThreadPoolExecutor(CLEANER_THREADS)
list(pool.map(_cleaner, reversed(list(self._container))))
else:
for o in sorted(list(self._container), key=lambda ro: type(ro)._priority, reverse=True):
_cleaner(o)
for obj in cleaned:
if obj in self._container:
self._container.remove(obj)
if not self._container and self._file_name:
try:
os.unlink(self._file_name)
except OSError:
pass
else:
self.dump()
def dump(self):
if self._file_name is None:
return
with open(self._file_name, 'wb') as outf:
pickle.dump(list(self._container), outf, protocol=0)
outf.close()
os.chmod(self._file_name, USER_FILE_RIGHTS)
def load(self):
try:
with open(self._file_name, 'rb') as inpf:
contents = pickle.load(inpf)
self._container.update(contents)
except (EOFError, OSError):
pass
class ObjectRepositoryLib(dict):
biz_ids = set([options.biz_id, ]) if options.biz_id else set(['default', ])
odps_info = dict()
biz_ids_json = json.dumps(list(biz_ids))
odps_info_json = json.dumps([v for v in six.itervalues(odps_info)])
def __init__(self, *args, **kwargs):
super(ObjectRepositoryLib, self).__init__(*args, **kwargs)
self._env = ExecutionEnv()
def __del__(self):
self._exec_cleanup_script()
@classmethod
def add_biz_id(cls, biz_id):
cls.biz_ids.add(biz_id)
cls.biz_ids_json = json.dumps(list(cls.biz_ids))
@classmethod
def add_odps_info(cls, odps):
odps_key = _gen_repository_key(odps)
cls.odps_info[odps_key] = dict(
access_id=odps.account.access_id, secret_access_key=odps.account.secret_access_key,
project=odps.project, endpoint=odps.endpoint
)
cls.odps_info_json = json.dumps([v for v in six.itervalues(cls.odps_info)])
def _exec_cleanup_script(self):
global cleanup_mode
if not self:
return
env = self._env
if cleanup_mode or not env.is_main_process or env.cleaned:
return
env.cleaned = True
script = env.template.format(import_paths=env.import_path_json, odps_info=self.odps_info_json,
host_pid=env.pid, biz_ids=self.biz_ids_json)
script_name = env.temp_dir + env.os_sep + 'tmp_' + str(env.pid) + '_cleanup_script.py'
script_file = env.io.FileIO(script_name, 'w')
script_file.write(env.conv_bytes(script))
script_file.close()
try:
if env.is_windows:
env.os.chmod(script_name, env.file_right)
else:
env.subprocess.call(['chmod', oct(env.file_right).replace('o', ''), script_name])
except:
pass
kwargs = dict(close_fds=True)
if env.is_windows:
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
kwargs['startupinfo'] = si
env.subprocess.call([env.executable, script_name], **kwargs)
_cleaned_keys = set()
_obj_repos = ObjectRepositoryLib() # this line should be put last due to initialization dependency
atexit.register(_obj_repos._exec_cleanup_script)
def _is_pid_running(pid):
if 'windows' in platform.platform().lower():
task_lines = os.popen('TASKLIST /FI "PID eq {0}" /NH'.format(pid)).read().strip().splitlines()
if not task_lines:
return False
return str(pid) in set(task_lines[0].split())
else:
try:
os.kill(pid, 0)
return True
except OSError:
return False
def clean_objects(odps, biz_ids=None):
odps_key = _gen_repository_key(odps)
files = []
biz_ids = biz_ids or _obj_repos.biz_ids
for biz_id in biz_ids:
files.extend(glob.glob(os.path.join(TEMP_ROOT, biz_id, odps_key, '*.his')))
for fn in files:
repo = ObjectRepository(fn)
repo.cleanup(odps, use_threads=False)
def clean_stored_objects(odps):
global cleanup_timeout, host_pid
if not utils.is_main_process():
return
odps_key = _gen_repository_key(odps)
if odps_key in _cleaned_keys:
return
_cleaned_keys.add(odps_key)
files = []
for biz_id in _obj_repos.biz_ids:
files.extend(glob.glob(os.path.join(TEMP_ROOT, biz_id, odps_key, '*.his')))
def clean_thread():
for fn in files:
writer_pid = int(fn.rsplit('__', 1)[-1].split('.', 1)[0])
# we do not clean running process, unless its pid equals host_pid
if writer_pid != host_pid and _is_pid_running(writer_pid):
continue
repo = ObjectRepository(fn)
repo.cleanup(odps)
thread_obj = threading.Thread(target=clean_thread)
thread_obj.start()
if cleanup_timeout == 0:
return
else:
if cleanup_timeout is not None and cleanup_timeout < 0:
cleanup_timeout = None
thread_obj.join(cleanup_timeout)
def _gen_repository_key(odps):
return hashlib.md5('####'.join([odps.account.access_id, odps.endpoint, odps.project]).encode('utf-8')).hexdigest()
def _put_objects(odps, objs):
odps_key = _gen_repository_key(odps)
biz_id = options.biz_id if options.biz_id else 'default'
ObjectRepositoryLib.add_biz_id(biz_id)
if odps_key not in _obj_repos:
if isinstance(odps.account, AliyunAccount):
ObjectRepositoryLib.add_odps_info(odps)
file_dir = os.path.join(TEMP_ROOT, biz_id, odps_key)
if not os.path.exists(file_dir):
os.makedirs(file_dir)
file_name = os.path.join(file_dir, 'temp_objs_{0}__{1}.his'.format(SESSION_KEY, os.getpid()))
_obj_repos[odps_key] = ObjectRepository(file_name)
[_obj_repos[odps_key].put(o, False) for o in objs]
_obj_repos[odps_key].dump()
def register_temp_table(odps, table, project=None):
if isinstance(table, six.string_types):
table = [table, ]
_put_objects(odps, [TempTable(t, project if project else odps.project) for t in table])
def register_temp_model(odps, model, project=None):
if isinstance(model, six.string_types):
model = [model, ]
_put_objects(odps, [TempModel(m, project if project else odps.project) for m in model])
def register_temp_resource(odps, resource, project=None):
if isinstance(resource, six.string_types):
resource = [resource, ]
_put_objects(odps, [TempResource(r, project if project else odps.project) for r in resource])
def register_temp_function(odps, func, project=None):
if isinstance(func, six.string_types):
func = [func, ]
_put_objects(odps, [TempFunction(f, project if project else odps.project) for f in func])
def register_temp_volume_partition(odps, volume_partition_tuple, project=None):
if isinstance(volume_partition_tuple, tuple):
volume_partition_tuple = [volume_partition_tuple, ]
_put_objects(odps, [TempVolumePartition(v, p, project if project else odps.project)
for v, p in volume_partition_tuple])
def compat_kwargs(kwargs):
if PY26:
new_desc = dict()
for k, v in six.iteritems(kwargs):
new_desc[k.encode('utf-8') if isinstance(k, unicode) else k] = v.encode('utf-8')
return new_desc
else:
return kwargs
|
foozzer.py | #!/usr/bin/env python3
"""
A cross platform fuzzing framework primarily targeting GUI applications.
usage: foozzer.py [-h] [--verbose] [-L] -i I -o O -D D -m MUTATOR -r RUNNER -- RUNNER_ARGS
runner_args
Options:
-h
--help show help message and exit
-v
--verbose increase output verbosity (can be given multiple times)
-L describe available plugins
-i I input directory
-o O output directory
-D D Dr.Memory bin directory
-m M mutator to use
-r R runner to use
RUNNER_ARGS arguments passed to selected runner module
"""
# Copyright (c) 2020 tick <tickelton@gmail.com>
# SPDX-License-Identifier: ISC
import os
import sys
import shutil
import argparse
import importlib
import pkgutil
import logging
from time import sleep
from subprocess import PIPE, STDOUT, Popen
from threading import Thread
from queue import Queue, Empty
from typing import List, Dict, Tuple, Any, TextIO, Union, Sequence, Optional, Generator
import foozzer.mutators
import foozzer.runners
ON_POSIX = os.name == 'posix'
# binaries
if ON_POSIX:
DRMEMORY_BIN = 'drmemory'
else:
DRMEMORY_BIN = 'drmemory.exe'
DRMEMORY_PARAMS = '-batch'
# misc constants
RUNFILE = 'foozzer.run'
PAUSEFILE = 'foozzer.pause'
LOG_OUTFILE = 'log.txt'
# logging configuration
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
def enqueue_output(out: TextIO, queue: 'Queue[str]') -> None:
"""Helper function for non-blocking reading of child STDOUT."""
for line in iter(out.readline, ''):
queue.put(line)
out.close()
def clear_queue(queue: 'Queue[str]', outfile: TextIO) -> None:
"""Helper function for non-blocking reading of child STDOUT."""
while True:
# non-blocking readline
try:
line = queue.get_nowait()
except Empty:
break
else:
outfile.write(line)
def startall(queue: 'Queue[str]', drmemory_bin: str, target_cmdline: List[str]) -> Tuple['Popen[str]', Thread]:
"""Starts fuzzee child process and thread for STDOUT queue."""
logger.debug(
'startall: drmemory_bin=%s target_cmdline=%s',
drmemory_bin,
target_cmdline
)
drmem = Popen(
[drmemory_bin, DRMEMORY_PARAMS, '--'] + target_cmdline,
stdout=PIPE,
stderr=STDOUT,
bufsize=1,
universal_newlines=True,
close_fds=ON_POSIX
)
qthread = Thread(target=enqueue_output, args=(drmem.stdout, queue))
#qthread.daemon = True
qthread.start()
sleep(1)
if drmem.poll() is not None:
logger.error('SOMETHING WENT WRONG!!')
qthread.join()
sys.exit(1)
return drmem, qthread
def stop_processes(target: str) -> None:
"""Stops fuzzee and Dr.Memrory processes, if running."""
if ON_POSIX:
os.system('pkill {}'.format(target))
else:
os.system('taskkill /t /im {}'.format(target))
sleep(2)
os.system('taskkill /t /im drmemory.exe')
def stopall(qthread: Thread, target: str) -> None:
"""Stops child processes and queue thread."""
stop_processes(target)
sleep(5)
qthread.join()
class ActionListPlugins(argparse.Action):
"""Argparser helper class to show plugins descriptions."""
def __init__(self, option_strings, dest, const, **kwargs):
self._descriptions = const
super().__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string):
for plugin_type in self._descriptions:
print('\n{}:\n'.format(plugin_type))
for k, val in self._descriptions[plugin_type].items():
print(' {} : {}'.format(k, val))
print('')
sys.exit(0)
def iter_namespace(ns_pkg):
"""Helper function for plugin discovery."""
return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + ".")
# TODO: Replace 'Any' with proper type
def discover_plugins(namespc) -> Dict[str, Tuple[str, Any]]:
"""
Discovers mutator and runner plugins.
Retrieves entry points of the modules and descriptions for help texts.
"""
plugins = {}
for finder, name, ispkg in iter_namespace(namespc):
try:
plugins.update(importlib.import_module(name).get_module_info()) # type: ignore
except AttributeError:
# If the module does not provide a get_module_info function
# it is probably an abstract base class or utility library.
# Anyways, since in that case we have no way to determine its
# correct entry point, we just ignore it.
pass
return plugins
def do_parse_args(args, mutators, runners) -> argparse.Namespace:
"""Argument parsing helper function."""
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', '-v', action='count', default=0)
parser.add_argument(
'-L',
nargs=0,
action=ActionListPlugins,
help='describe available plugins',
const={
'Mutators': {n: mutators[n][0] for n in mutators},
'Runners': {n: runners[n][0] for n in runners},
}
)
parser.add_argument(
'-i',
required=True,
help='input directory'
)
parser.add_argument(
'-o',
required=True,
help='output directory'
)
parser.add_argument(
'-D',
required=True,
help='Dr.Memory bin directory'
)
parser.add_argument(
'-m',
required=True,
choices = [m for m in mutators],
help='mutator to use'
)
parser.add_argument(
'-r',
required=True,
choices = [m for m in runners],
help='runner to use'
)
parser.add_argument(
'-f',
help='filename as seen by target process'
)
parser.add_argument('runner_args', nargs=argparse.REMAINDER)
return parser.parse_args(args)
def main(args=None) -> None:
"""foozzer.py main function"""
mutators = discover_plugins(foozzer.mutators)
runners = discover_plugins(foozzer.runners)
args = do_parse_args(args, mutators, runners)
if args.verbose == 1:
logger.setLevel(logging.WARNING)
elif args.verbose == 2:
logger.setLevel(logging.INFO)
elif args.verbose > 2:
logger.setLevel(logging.DEBUG)
runner_class = runners[args.r][1]
runner = runner_class(args.runner_args[1:])
target_process = runner.get_process_name()
input_mutator = mutators[args.m][1]
stop_processes(target_process)
queue: 'Queue[str]' = Queue()
drmem, qthread = startall(queue, os.path.join(args.D, DRMEMORY_BIN), runner.get_cmdline())
logger.info('Opening logfile')
log_outfile = open(os.path.join(args.o, LOG_OUTFILE), 'a')
runfile_path = os.path.join(args.o, RUNFILE)
pausefile_path = os.path.join(args.o, PAUSEFILE)
mutator = input_mutator(args.i, args.o)
i = 0
logger.debug('Clearing queue initially')
clear_queue(queue, log_outfile)
stop_processes(target_process)
runner.setup()
log_outfile.flush()
logger.info('MAINLOOP START')
for input_file, state_msg in mutator:
if not os.path.isfile(runfile_path):
logger.info('Stopping due to missing run file: %s', runfile_path)
break
logger.debug('clearing queue')
clear_queue(queue, log_outfile)
logger.debug('checking if Dr.Memory is still running')
if drmem.poll() is not None:
logger.info('RESTARTING Dr.Memory')
stopall(qthread, target_process)
drmem, qthread = startall(
queue,
os.path.join(args.D, DRMEMORY_BIN),
runner.get_cmdline()
)
runner.setup()
if os.path.isfile(pausefile_path):
logger.info('pausing...')
while os.path.isfile(pausefile_path):
sleep(1)
if args.f:
# If a specific filename was requested for the target
# appliction with the argument '-f', copy the file the
# mutator generated to that destination.
shutil.copyfile(
os.path.join(args.o, input_file),
os.path.join(args.o, args.f)
)
input_file = args.f
logger.debug('Iteration: %s\n', state_msg)
log_outfile.flush()
log_outfile.write('FOOZZER: Iteration: {}\n'.format(state_msg))
log_outfile.flush()
# TODO: make it clear that an unsuccessful run requires a
# reset of the fuzzing process, e.g. by returning
# some meaningful value like 'RUNNER_{FAILURE|RESTART}'...
if not runner.run(input_file):
log_outfile.write('Resetting after Runner error')
logger.warning('Resetting after Runner error')
clear_queue(queue, log_outfile)
if drmem.poll() is None:
drmem.terminate()
stopall(qthread, target_process)
drmem, qthread = startall(
queue,
os.path.join(args.D, DRMEMORY_BIN),
runner.get_cmdline()
)
runner.setup()
i += 1
logger.info('MAINLOOP END')
if drmem.poll() is None:
logger.debug('terminating')
drmem.terminate()
stopall(qthread, target_process)
clear_queue(queue, log_outfile)
log_outfile.flush()
log_outfile.write('FOOZZER: FINISHED')
log_outfile.close()
logger.info('FINISHED')
if __name__ == '__main__':
main()
|
rst.py | """Read ANSYS binary result files *.rst
Used:
/usr/ansys_inc/v150/ansys/customize/include/fdresu.inc
"""
import time
import warnings
import logging
import ctypes
from threading import Thread
import vtk
import numpy as np
import pyvista as pv
from pyansys import _binary_reader, _parser, _reader
from pyansys.elements import valid_types
from pyansys._binary_reader import (cells_with_any_nodes, cells_with_all_nodes)
from pyansys.common import (read_table, parse_header, AnsysBinary,
read_standard_header, two_ints_to_long)
# Create logger
LOG = logging.getLogger(__name__)
LOG.setLevel('DEBUG')
np.seterr(divide='ignore', invalid='ignore')
def merge_two_dicts(x, y):
merged = x.copy() # start with x's keys and values
merged.update(y) # modifies z with y's keys and values & returns None
return merged
# Pointer information from ansys interface manual
# =============================================================================
# Individual element index table
ELEMENT_INDEX_TABLE_KEYS = ['EMS', 'ENF', 'ENS', 'ENG', 'EGR',
'EEL', 'EPL', 'ECR', 'ETH', 'EUL',
'EFX', 'ELF', 'EMN', 'ECD', 'ENL',
'EHC', 'EPT', 'ESF', 'EDI', 'ETB',
'ECT', 'EXY', 'EBA', 'ESV', 'MNL']
ELEMENT_RESULT_NCOMP = {'ENS': 6,
'EEL': 7,
'EPL': 7,
'ECR': 7,
'ETH': 8,
'ENL': 10,
'EDI': 7}
ELEMENT_INDEX_TABLE_INFO = {
'EMS': 'misc. data',
'ENF': 'nodal forces',
'ENS': 'nodal stresses',
'ENG': 'volume and energies',
'EGR': 'nodal gradients',
'EEL': 'elastic strains',
'EPL': 'plastic strains',
'ECR': 'creep strains',
'ETH': 'thermal strains',
'EUL': 'euler angles',
'EFX': 'nodal fluxes',
'ELF': 'local forces',
'EMN': 'misc. non-sum values',
'ECD': 'element current densities',
'ENL': 'nodal nonlinear data',
'EHC': 'calculated heat generations',
'EPT': 'element temperatures',
'ESF': 'element surface stresses',
'EDI': 'diffusion strains',
'ETB': 'ETABLE items',
'ECT': 'contact data',
'EXY': 'integration point locations',
'EBA': 'back stresses',
'ESV': 'state variables',
'MNL': 'material nonlinear record'
}
SOLUTION_DATA_HEADER_KEYS = ['pv3num', 'nelm', 'nnod', 'mask', 'itime',
'iter', 'ncumit', 'nrf', 'cs_LSC', 'nmast',
'ptrNSL', 'ptrESL', 'ptrRF', 'ptrMST',
'ptrBC', 'rxtrap', 'mode', 'isym', 'kcmplx',
'numdof', 'DOFS', 'DOFS', 'DOFS', 'DOFS',
'DOFS', 'DOFS', 'DOFS', 'DOFS', 'DOFS',
'DOFS', 'DOFS', 'DOFS', 'DOFS', 'DOFS',
'DOFS', 'DOFS', 'DOFS', 'DOFS', 'DOFS',
'DOFS', 'DOFS', 'DOFS', 'DOFS', 'DOFS',
'DOFS', 'DOFS', 'DOFS', 'DOFS', 'DOFS',
'DOFS', 'title', 'title', 'title', 'title',
'title', 'title', 'title', 'title', 'title',
'title', 'title', 'title', 'title', 'title',
'title', 'title', 'title', 'title', 'title',
'title', 'stitle', 'stitle', 'stitle',
'stitle', 'stitle', 'stitle', 'stitle',
'stitle', 'stitle', 'stitle', 'stitle',
'stitle', 'stitle', 'stitle', 'stitle',
'stitle', 'stitle', 'stitle', 'stitle',
'stitle', 'dbmtim', 'dbmdat', 'dbfncl',
'soltim', 'soldat', 'ptrOND', 'ptrOEL',
'nfldof', 'ptrEXA', 'ptrEXT', 'ptrEXAl',
'ptrEXAh', 'ptrEXTl', 'ptrEXTh', 'ptrNSLl',
'ptrNSLh', 'ptrRFl', 'ptrRFh', 'ptrMSTl',
'ptrMSTh', 'ptrBCl', 'ptrBCh', 'ptrTRFl',
'ptrTRFh', 'ptrONDl', 'ptrONDh', 'ptrOELl',
'ptrOELh', 'ptrESLl', 'ptrESLh', 'ptrOSLl',
'ptrOSLh', 'sizeDEAD', 'ptrDEADl', 'ptrDEADh',
'PrinKey','numvdof', 'numadof', '0', '0',
'ptrVSLl','ptrVSLh', 'ptrASLl', 'ptrASLh', '0',
'0', '0', '0', 'numRotCmp', '0',
'ptrRCMl', 'ptrRCMh', 'nNodStr', '0', 'ptrNDSTRl',
'ptrNDSTRh', 'AvailData', 'geomID', 'ptrGEOl', 'ptrGEOh']
SOLUTION_HEADER_KEYS_DP = ['timfrq', 'lfacto', 'lfactn', 'cptime', 'tref',
'tunif', 'tbulk', 'volbase', 'tstep', '__unused',
'accel_x', 'accel_y', 'accel_z', 'omega_v_x', 'omega_v_y',
'omega_v_z', 'omega_a_x', 'omega_a_y', 'omega_a_z', 'omegacg_v_x',
'omegacg_v_y', 'omegacg_v_z', 'omegacg_a_x', 'omegacg_a_y', 'omegacg_a_z',
'cgcent', 'cgcent', 'cgcent', 'fatjack', 'fatjack',
'dval1', 'pCnvVal', #'pCnvVal', 'pCnvVal',
# 'pCnvVal', 'pCnvVal', 'pCnvVal', 'pCnvVal', 'pCnvVal',
# 'pCnvVal', 'pCnvVal', 'pCnvVal', 'pCnvVal',
# 'pCnvVal', 'pCnvVal', 'pCnvVal', 'pCnvVal', 'pCnvVal']
# c timdat, timdat, timdat, timdat, timdat,
# c timdat, timdat, timdat, timdat, timdat, (60)
# c timdat, timdat, timdat, timdat, timdat,
# c timdat, timdat, timdat, timdat, timdat, (70)
# c timdat, timdat, timdat, timdat, timdat,
# c timdat, timdat, timdat, timdat, timdat, (80)
# c timdat, timdat, timdat, timdat, timdat,
# c timdat, timdat, timdat, timdat, timdat, (90)
# c timdat, timdat, timdat, timdat, timdat,
# c timdat, timdat, timdat, timdat, timdat (100)
]
GEOMETRY_HEADER_KEYS = ['__unused', 'maxety', 'maxrl', 'nnod', 'nelm',
'maxcsy', 'ptrETY', 'ptrREL', 'ptrLOC',
'ptrCSY', 'ptrEID', 'maxsec', 'secsiz',
'maxmat', 'matsiz', 'ptrMAS', 'csysiz',
'elmsiz', 'etysiz', 'rlsiz', 'ptrETYl',
'ptrETYh', 'ptrRELl', 'ptrRELh', 'ptrCSYl',
'ptrCSYh', 'ptrLOCl', 'ptrLOCh', 'ptrEIDl',
'ptrEIDh', 'ptrMASl', 'ptrMASh', 'ptrSECl',
'ptrSECh', 'ptrMATl', 'ptrMATh', 'ptrCNTl',
'ptrCNTh', 'ptrNODl', 'ptrNODh', 'ptrELMl',
'ptrELMh', 'Glbnnod', 'ptrGNODl', 'ptrGNODh',
'maxn', 'NodesUpd', 'lenbac', 'maxcomp',
'compsiz', 'ptrCOMPl', 'ptrCOMPh']
RESULT_HEADER_KEYS = ['fun12', 'maxn', 'nnod', 'resmax', 'numdof',
'maxe', 'nelm', 'kan', 'nsets', 'ptrend',
'ptrDSIl', 'ptrTIMl', 'ptrLSPl', 'ptrELMl',
'ptrNODl', 'ptrGEOl', 'ptrCYCl', 'CMSflg',
'csEls', 'units', 'nSector', 'csCord',
'ptrEnd8', 'ptrEnd8', 'fsiflag', 'pmeth',
'noffst', 'eoffst', 'nTrans', 'ptrTRANl',
'PrecKey', 'csNds', 'cpxrst', 'extopt',
'nlgeom', 'AvailData', 'mmass', 'kPerturb',
'XfemKey', 'rstsprs', 'ptrDSIh', 'ptrTIMh',
'ptrLSPh', 'ptrCYCh', 'ptrELMh', 'ptrNODh',
'ptrGEOh', 'ptrTRANh', 'Glbnnod', 'ptrGNODl',
'ptrGNODh', 'qrDmpKy', 'MSUPkey', 'PSDkey',
'cycMSUPkey', 'XfemCrkPropTech']
class ResultFile(AnsysBinary):
"""Reads a binary ANSYS result file.
Parameters
----------
filename : str, optional
Filename of the ANSYS binary result file.
ignore_cyclic : bool, optional
Ignores any cyclic properties.
read_geometry : bool, optional
Debug parameter.
Examples
--------
>>> import pyansys
>>> rst = pyansys.read_binary('file.rst')
"""
def __init__(self, filename, ignore_cyclic=False, read_geometry=True):
"""Loads basic result information from result file and
initializes result object.
"""
self.filename = filename
self.resultheader = self._read_result_header()
# Get the total number of results and log it
self.nsets = len(self.resultheader['rpointers'])
LOG.debug('There are %d result(s) in this file', self.nsets)
# Get indices to resort nodal and element results
self.sidx = np.argsort(self.resultheader['neqv'])
self.sidx_elem = np.argsort(self.resultheader['eeqv'])
# Store node numbering in ANSYS
self.nnum = self.resultheader['neqv'][self.sidx]
self.enum = self.resultheader['eeqv'][self.sidx_elem]
# store geometry for later retrival
if read_geometry:
self.store_geometry()
self.header = parse_header(self.read_record(103), RESULT_HEADER_KEYS)
self.geometry_header = {}
@property
def n_sector(self):
"""Number of sectors"""
return self.resultheader['nSector']
def _read_result_header(self):
"""Returns pointers used to access results from an ANSYS result file.
Parameters
----------
filename : string
Filename of result file.
Returns
-------
resultheader : dictionary
Result header
"""
# consider moving this to the main class
standard_header = read_standard_header(self.filename)
# Read .RST FILE HEADER
header = parse_header(self.read_record(103), RESULT_HEADER_KEYS)
resultheader = merge_two_dicts(header, standard_header)
# Read nodal equivalence table
resultheader['neqv'] = self.read_record(resultheader['ptrNOD'])
# Read nodal equivalence table
resultheader['eeqv'] = self.read_record(resultheader['ptrELM'])
# Read table of pointers to locations of results
nsets = resultheader['nsets']
# Data sets index table. This record contains the record pointers
# for the beginning of each data set. The first resmax records are
# the first 32 bits of the index, the second resmax records are
# the second 32 bits f.seek((ptrDSIl + 0) * 4)
record = self.read_record(resultheader['ptrDSI'])
raw0 = record[:resultheader['resmax']].tobytes()
raw1 = record[resultheader['resmax']:].tobytes()
# this combines the two ints, not that efficient
subraw0 = [raw0[i*4:(i+1)*4] for i in range(nsets)]
subraw1 = [raw1[i*4:(i+1)*4] for i in range(nsets)]
longraw = [subraw0[i] + subraw1[i] for i in range(nsets)]
longraw = b''.join(longraw)
rpointers = np.frombuffer(longraw, 'i8')
assert (rpointers >= 0).all(), 'Data set index table has negative pointers'
resultheader['rpointers'] = rpointers
# read in time values
record = self.read_record(resultheader['ptrTIM'])
resultheader['time_values'] = record[:resultheader['nsets']]
# load harmonic index of each result
if resultheader['ptrCYC']:
record = self.read_record(resultheader['ptrCYC'])
hindex = record[:resultheader['nsets']]
# ansys 15 doesn't track negative harmonic indices
if not np.any(hindex < -1):
# check if duplicate frequencies
tvalues = resultheader['time_values']
for i in range(tvalues.size - 1):
# adjust tolarance(?)
if np.isclose(tvalues[i], tvalues[i + 1]):
hindex[i + 1] *= -1
resultheader['hindex'] = hindex
# load step table with columns:
# [loadstep, substep, and cumulative]
record = self.read_record(resultheader['ptrLSP'])
resultheader['ls_table'] = record[:resultheader['nsets']*3].reshape(-1, 3)
return resultheader
def parse_coordinate_system(self):
"""Reads in coordinate system information from a binary result
file.
Parameters
----------
f : file object
Open binary result file.
geometry_header, dict
Dictionary containing pointers to geometry items in the ansys
result.
Returns
-------
c_systems : dict
Dictionary containing one entry for each defined coordinate
system. If no non-standard coordinate systems have been
defined, there will be only one None. First coordinate system
is assumed to be global cartesian.
Notes
-----
euler angles : [THXY, THYZ, THZX]
- First rotation about local Z (positive X toward Y).
- Second rotation about local X (positive Y toward Z).
- Third rotation about local Y (positive Z toward X).
PAR1
Used for elliptical, spheroidal, or toroidal systems. If KCS = 1
or 2, PAR1 is the ratio of the ellipse Y-axis radius to X-axis
radius (defaults to 1.0 (circle)). If KCS = 3, PAR1 is the major
radius of the torus.
PAR2
Used for spheroidal systems. If KCS = 2, PAR2 = ratio of ellipse
Z-axis radius to X-axis radius (defaults to 1.0 (circle)).
Coordinate system type:
- 0: Cartesian
- 1: Cylindrical (circular or elliptical)
- 2: Spherical (or spheroidal)
- 3: Toroidal
"""
# number of coordinate systems
maxcsy = self.geometry_header['maxcsy']
# load coordinate system index table
ptr_csy = self.geometry_header['ptrCSY']
if ptr_csy:
csy = self.read_record(ptr_csy)
# parse each coordinate system
# The items stored in each record:
# * Items 1-9 are the transformation matrix.
# * Items 10-12 are the coordinate system origin (XC,YC,ZC).
# * Items 13-14 are the coordinate system parameters (PAR1, PAR2).
# * Items 16-18 are the angles used to define the coordinate system.
# * Items 19-20 are theta and phi singularity keys.
# * Item 21 is the coordinate system type (0, 1, 2, or 3).
# * Item 22 is the coordinate system reference number.
c_systems = [None]
for i in range(maxcsy):
if not csy[i]:
c_system = None
else:
data = self.read_record(ptr_csy + csy[i])
c_system = {'transformation matrix': np.array(data[:9].reshape(-1, 3)),
'origin': np.array(data[9:12]),
'PAR1': data[12],
'PAR2': data[13],
'euler angles': data[15:18], # may not be euler
'theta singularity': data[18],
'phi singularity': data[19],
'type': int(data[20]),
'reference num': int(data[21],)
}
c_systems.append(c_system)
return c_systems
def plot(self, node_components=None, sel_type_all=True, **kwargs):
"""Plot result geometry
Parameters
----------
node_components : list, optional
Accepts either a string or a list strings of node
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
sel_type_all : bool, optional
If node_components is specified, plots those elements
containing all nodes of the component. Default True.
**kwargs : keyword arguments
Optional keyword arguments. See help(pyvista.plot)
Returns
-------
cpos : list
List of camera position, focal point, and view up.
Examples
--------
>>> import pyansys
>>> rst = pyansys.read_binary('file.rst')
>>> rst.plot()
"""
show_edges = kwargs.pop('show_edges', True)
if node_components:
grid, _ = self._extract_node_components(node_components, sel_type_all)
else:
grid = self.grid
return self._plot_point_scalars(None, grid=grid, show_edges=show_edges,
**kwargs)
def plot_nodal_solution(self, rnum, comp='norm',
show_displacement=False,
max_disp=0.1,
node_components=None, sel_type_all=True,
**kwargs):
"""Plots the nodal solution.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
comp : str, optional
Display component to display. Options are ``'x'``, ``'y'``, ``'z'``,
or ``'norm'``. This corresponds to the x directin, y direction,
z direction, and the normalized result.
show_displacement : bool, optional
Deforms mesh according to the result.
displacement_factor : float, optional
Increases or decreases displacement by a factor.
node_components : list, optional
Accepts either a string or a list strings of node
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
sel_type_all : bool, optional
If node_components is specified, plots those elements
containing all nodes of the component. Default True.
Returns
-------
cpos : list
Camera position from vtk render window.
Examples
--------
Plot the nodal solution result 0 of verification manual
example
>>> import pyansys
>>> result = pyansys.download_verification_result(33)
>>> result.plot_nodal_solution(0)
Plot with a white background and showing edges
>>> result.plot_nodal_solution(0, background='w', show_edges=True)
"""
# Load result from file
rnum = self.parse_step_substep(rnum)
nnum, result = self.nodal_solution(rnum)
label = 'Displacement'
# Process result
if comp == 'x':
scalars = result[:, 0]
stitle = 'X {:s}\n'.format(label)
elif comp == 'y':
scalars = result[:, 1]
stitle = 'Y {:s}\n'.format(label)
elif comp == 'z':
scalars = result[:, 2]
stitle = 'Z {:s}\n'.format(label)
else:
# Normalize displacement
scalars = result[:, :3]
scalars = (scalars*scalars).sum(1)**0.5
stitle = 'Normalized\n%s\n' % label
# sometimes there are less nodes in the result than in the geometry
npoints = self.grid.number_of_points
if nnum.size != npoints:
new_scalars = np.empty(npoints)
new_scalars[:] = np.nan
nnum_grid = self.grid.point_arrays['ansys_node_num']
mask = np.in1d(nnum_grid, nnum)
new_scalars[mask] = scalars
scalars = new_scalars
ind = None
if node_components:
grid, ind = self._extract_node_components(node_components,
sel_type_all)
scalars = scalars[ind]
else:
grid = self.grid
if show_displacement:
disp = self.nodal_solution(rnum)[1][:, :3]
if ind is not None:
disp = disp[ind]
# scale max displacement
disp /= (np.abs(disp).max()/max_disp)
new_points = disp + grid.points
grid = grid.copy()
grid.points = new_points
return self._plot_point_scalars(scalars, rnum=rnum, grid=grid,
# show_displacement=show_displacement,
# displacement_factor=displacement_factor,
node_components=node_components,
sel_type_all=sel_type_all,
**kwargs)
@property
def node_components(self):
""" dictionary of ansys node components """
ansyscomp = {}
for key in self.grid.point_arrays:
data = self.grid.point_arrays[key]
if data.dtype == 'uint8' or data.dtype == 'bool':
ansyscomp[key] = data.view(np.bool)
return ansyscomp
def _extract_node_components(self, node_components,
sel_type_all=True, grid=None):
""" Returns the part of the grid matching node components """
if grid is None:
grid = self.grid
if not self.geometry['components']: # pragma: no cover
raise Exception('Missing component information.\n' +
'Either no components have been stored, or ' +
'the version of this result file is <18.2')
if isinstance(node_components, str):
node_components = [node_components]
mask = np.zeros(grid.n_points, np.bool)
for component in node_components:
component = component.upper()
if component not in grid.point_arrays:
raise Exception('Result file does not contain node ' +
'component "%s"' % component)
mask += grid.point_arrays[component].view(np.bool)
# mask = np.logical_not(mask)
# need to extract the mesh
cells = grid.cells
offset = grid.offset
if sel_type_all:
cell_mask = cells_with_all_nodes(offset, cells, grid.celltypes,
mask.view(np.uint8))
else:
cell_mask = cells_with_any_nodes(offset, cells, grid.celltypes,
mask.view(np.uint8))
if not cell_mask.any():
raise RuntimeError('Empty component')
reduced_grid = grid.extract_cells(cell_mask)
if not reduced_grid.n_cells:
raise Exception('Empty mesh due to component selection\n' +
'Try "sel_type_all=False"')
ind = reduced_grid.point_arrays['vtkOriginalPointIds']
if not ind.any():
raise RuntimeError('Invalid component')
return reduced_grid, ind
@property
def time_values(self):
return self.resultheader['time_values']
def animate_nodal_solution(self, rnum, comp='norm',
node_components=None,
sel_type_all=True, add_text=True,
max_disp=0.1, nangles=100,
movie_filename=None, **kwargs):
"""Animate nodal solution. Assumes nodal solution is a
displacement array from a modal solution.
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
comp : str, optional
Scalar component to display. Options are 'x', 'y', 'z',
and 'norm', and None.
max_disp : float, optional
Maximum displacement in the units of the model. Default
0.1
nangles : int, optional
Number of "frames" between each full cycle.
movie_filename : str, optional
Filename of the movie to open. Filename should end in mp4,
but other filetypes may be supported. See "imagio.get_writer".
A single loop of the mode will be recorded.
kwargs : optional keyword arguments, optional
See help(pyvista.Plot) for additional keyword arguments.
"""
scalars = None
if comp:
_, disp = self.nodal_solution(rnum)
disp = disp[:, :3]
if comp == 'x':
axis = 0
elif comp == 'y':
axis = 1
elif comp == 'z':
axis = 2
else:
axis = None
if axis is not None:
scalars = disp[:, axis]
else:
scalars = (disp*disp).sum(1)**0.5
if node_components:
grid, ind = self._extract_node_components(node_components, sel_type_all)
if comp:
scalars = scalars[ind]
else:
grid = self.grid
return self._plot_point_scalars(scalars, rnum=rnum, grid=grid,
add_text=add_text,
animate=True,
node_components=node_components,
sel_type_all=sel_type_all,
nangles=nangles,
movie_filename=movie_filename,
max_disp=max_disp, **kwargs)
def nodal_time_history(self, solution_type='NSL', in_nodal_coord_sys=False):
"""Returns the DOF solution for each node in the global
cartesian coordinate system or nodal coordinate system.
Parameters
----------
solution_type: str, optional
Specify, whether nodal displacements ('NSL'), nodal
velocities ('VEL') or nodal accelerations ('ACC') will be
read.
in_nodal_coord_sys : bool, optional
When True, returns results in the nodal coordinate system.
Default False.
Returns
-------
nnum : int np.ndarray
Node numbers associated with the results.
result : float np.ndarray
Result is ``self.nsets x nnod x Sumdof``, or number of
time steps by number of nodes by degrees of freedom.
"""
if solution_type not in ('NSL', 'VEL', 'ACC'):
raise ValueError("Argument 'solution type' must be either 'NSL', 'VEL', or 'ACC'")
# Get info from result header
rpointers = self.resultheader['rpointers']
nsets = self.nsets
# Read a result
# with open(self.filename, 'rb') as f:
# get first solution header and assume, following solution
# headers are equal
record = self.read_record(rpointers[0])
solution_header = parse_header(record, SOLUTION_DATA_HEADER_KEYS)
mask = solution_header['mask']
#PDBN = bool(mask & 0b1<<10)
pdnsl = bool(solution_header['AvailData'] & 0b1<<27)
PDVEL = bool(mask & 0b1<<27)
PDACC = bool(mask & 0b1<<28)
if solution_type == 'NSL' and not pdnsl:
raise Exception("Result file does not contain nodal displacements.")
if solution_type == 'VEL' and not PDVEL:
raise Exception("Result file does not contain nodal velocities.")
if solution_type == 'ACC' and not PDACC:
raise Exception("Result file does not contain nodal accelerations.")
nnod = solution_header['nnod']
numdof = solution_header['numdof']
nfldof = solution_header['nfldof']
sumdof = numdof + nfldof
#numvdof = solution_header['numvdof'] # does not seem to be set in transient analysis
#if not numvdof: numvodf = sumdof
#numadof = solution_header['numadof'] # does not seem to be set in transient analysis
#if not numadof: numadof = sumdof
# iterate over all loadsteps
results = np.zeros((nsets, nnod, sumdof))
for rnum in range(self.nsets):
# Seek to result table and to get pointer to DOF
# results of result table
rptr = self.read_record(rpointers[rnum])
if solution_type == 'NSL': # Nodal Displacements
ptrSL = rptr[10] # item 12
elif solution_type == 'VEL': # Nodal Velocities
# from items 131, 132
ptrSL = two_ints_to_long(rptr[130], rptr[131])
elif solution_type == 'ACC': # Nodal Accelerations
# from items 133, 134
ptrSL = two_ints_to_long(rptr[132], rptr[133])
record, sz = self.read_record(rpointers[rnum] + ptrSL,
return_bufsize=True)
# nitems = record.size
result = record.reshape((-1, sumdof))
# PDBN should be set if only a subset of nodes was output
# PDBN is set only when solution type: nodal solution/displacement
# PDBN is not set when solution type: acceleration, velocities
if record.size != sumdof*nnod:
# read the next record to the internal indexing reording
nodlist = self.read_record(rpointers[rnum] + ptrSL + sz)
# convert to zero index
sidx = nodlist -1
else:
# Reorder based on sorted indexing
sidx = self.sidx
results[rnum, sidx, :] = result
if not in_nodal_coord_sys:
# ansys writes the results in the nodal coordinate system.
# Convert this to the global coordinate system (in degrees)
euler_angles = self.geometry['nodes'][self.insolution, 3:].T
theta_xy, theta_yz, theta_zx = euler_angles
for rnum in range(nsets):
result = results[rnum, :, :]
if np.any(theta_xy):
pv.common.axis_rotation(result, theta_xy, inplace=True, axis='z')
if np.any(theta_yz):
pv.common.axis_rotation(result, theta_yz, inplace=True, axis='x')
if np.any(theta_zx):
pv.common.axis_rotation(result, theta_zx, inplace=True, axis='y')
# check for invalid values
# it seems mapdl writes invalid values as 2*100
results[results == 2**100] = 0
# also include nodes in output
return self.nnum, results
def nodal_solution(self, rnum, in_nodal_coord_sys=False):
"""Returns the DOF solution for each node in the global
cartesian coordinate system or nodal coordinate system.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
in_nodal_coord_sys : bool, optional
When True, returns results in the nodal coordinate system.
Default False.
Returns
-------
nnum : int np.ndarray
Node numbers associated with the results.
result : float np.ndarray
Result is (``nnod`` x ``sumdof``), or number of nodes by degrees
of freedom which includes ``numdof`` and ``nfldof``.
Examples
--------
>>> import pyansys
>>> rst = pyansys.read_binary('file.rst')
>>> nnum, data = rst.nodal_solution(0)
Notes
-----
Some solution results may not include all node numbers. This
is reflected in the ``result`` and ``nnum`` arrays.
"""
# convert to cumulative index
rnum = self.parse_step_substep(rnum)
# result pointer
ptr_rst = self.resultheader['rpointers'][rnum]
result_solution_header = parse_header(self.read_record(ptr_rst),
SOLUTION_DATA_HEADER_KEYS)
nnod = result_solution_header['nnod']
numdof = result_solution_header['numdof']
nfldof = result_solution_header['nfldof']
sumdof = numdof + nfldof
ptr_nsl = result_solution_header['ptrNSL']
# Read the nodal solution
result, bufsz = self.read_record(ptr_nsl + ptr_rst, True)
result = result.reshape(-1, sumdof)
# no idea why the result written is twice as long...
result = result[:result.shape[0]//2]
# # it's possible that not all results are written
if result.shape[0] != nnod:
# read second buffer containing the node indices of the
# results and convert from fortran to zero indexing
sidx = self.read_record(ptr_nsl + ptr_rst + bufsz) - 1
unsort_nnum = self.resultheader['neqv'][sidx]
# now, sort using the new sorted node numbers indices
new_sidx = np.argsort(unsort_nnum)
nnum = unsort_nnum[new_sidx]
result = result[new_sidx]
# these are the associated nodal locations
# sidx_inv = np.argsort(self.sidx)
# nodes = self.geometry['nodes'][sidx_inv][sidx][:, :3]
else:
nnum = self.nnum
result = result.take(self.sidx, 0)
if not in_nodal_coord_sys:
# ansys writes the results in the nodal coordinate system.
# Convert this to the global coordinate system (in degrees)
euler_angles = self.geometry['nodes'][self.insolution, 3:].T
theta_xy, theta_yz, theta_zx = euler_angles
if np.any(theta_xy):
pv.common.axis_rotation(result, theta_xy, inplace=True, axis='z')
if np.any(theta_yz):
pv.common.axis_rotation(result, theta_yz, inplace=True, axis='x')
if np.any(theta_zx):
pv.common.axis_rotation(result, theta_zx, inplace=True, axis='y')
# check for invalid values
# it seems mapdl writes invalid values as 2*100
result[result == 2**100] = 0
# also include nodes in output
return nnum, result
def _read_components(self):
"""Read components from an ANSYS result file
Returns
-------
components : dict
Dictionary of components
"""
components = {}
ncomp = self.geometry_header['maxcomp']
if not ncomp:
return components
# Read through components
file_ptr = self.geometry_header['ptrCOMP']
for _ in range(ncomp):
table, sz = self.read_record(file_ptr, True)
file_ptr += sz # increment file_pointer
# strings are up to 32 characters
raw = table[1:9].tobytes().split(b'\x00')[0]
name = raw.decode('utf')
name = name[:4][::-1] + name[4:8][::-1] + name[8:12][::-1] +\
name[12:16][::-1] + name[16:20][::-1] + name[20:24][::-1] +\
name[24:28][::-1] + name[28:32][::-1]
name = name.strip()
data = table[9:]
if data.any():
components[name] = _reader.component_interperter(data)
return components
def store_geometry(self):
""" Stores the geometry from the result file """
# read geometry header
table = self.read_record(self.resultheader['ptrGEO'])
geometry_header = parse_header(table, GEOMETRY_HEADER_KEYS)
self.geometry_header = geometry_header
# Node information
nnod = geometry_header['nnod']
nnum = np.empty(nnod, np.int32)
nloc = np.empty((nnod, 6), np.float)
_binary_reader.load_nodes(self.filename, geometry_header['ptrLOC'],
nnod, nloc, nnum)
# Element information
nelm = geometry_header['nelm']
maxety = geometry_header['maxety']
# pointer to the element type index table
e_type_table = self.read_record(geometry_header['ptrETY'])
# store information for each element type
# make these arrays large so you can reference a value via element
# type numbering
# number of nodes for this element type
nodelm = np.empty(10000, np.int32)
# number of nodes per element having nodal forces
nodfor = np.empty(10000, np.int32)
# number of nodes per element having nodal stresses
nodstr = np.empty(10000, np.int32)
# etype_id = np.empty(maxety, np.int32)
ekey = []
keyopts = np.zeros((10000, 11), np.int16)
for i in range(maxety):
if not e_type_table[i]:
continue
ptr = geometry_header['ptrETY'] + e_type_table[i]
einfo = self.read_record(ptr)
etype_ref = einfo[0]
ekey.append(einfo[:2])
# Items 3-14 - element type option keys (keyopts)
keyopts[etype_ref] = einfo[2:13]
# Item 61 - number of nodes for this element type (nodelm)
nodelm[etype_ref] = einfo[60]
# Item 63 - number of nodes per element having nodal
# forces, etc. (nodfor)
nodfor[etype_ref] = einfo[62]
# Item 94 - number of nodes per element having nodal
# stresses, etc. (nodstr). This number is the number
# of corner nodes for higher-ordered elements.
nodstr[etype_ref] = einfo[93]
# with KEYOPT(8)=0, the record contains stresses at
# each corner node (first at the bottom shell surface,
# then the top surface)
#
# Only valid for SHELL181 or SHELL281 elements.
if einfo[1] == 181 or einfo[1] == 281:
if keyopts[etype_ref, 7] == 0:
nodstr[etype_ref] *= 2
# store element table data
self.element_table = {'nodelm': nodelm,
'nodfor': nodfor,
'nodstr': nodstr,
'keyopts': keyopts}
# the element description table
# must view this record as int64, even though ansys reads
# it in as a 32 bit int
ptr_eid = geometry_header['ptrEID']
e_disp_table = self.read_record(ptr_eid).view(np.int64)
# get pointer to start of element table and adjust element pointers
ptr_elem = geometry_header['ptrEID'] + e_disp_table[0]
e_disp_table -= e_disp_table[0]
# read in coordinate systems
c_systems = self.parse_coordinate_system()
# The following is stored for each element
# mat - material reference number
# type - element type number
# real - real constant reference number
# secnum - section number
# esys - element coordinate system
# death - death flat (1 live, 0 dead)
# solidm - solid model reference
# shape - coded shape key
# elnum - element number
# baseeid - base element number
# NODES - node numbers defining the element
# allocate memory for this (a maximum of 21 points per element)
etype = np.empty(nelm, np.int32)
elem = np.empty((nelm, 20), np.int32)
elem[:] = -1
mtype = np.empty(nelm, np.int32)
rcon = np.empty(nelm, np.int32)
esys = np.empty(nelm, np.int32)
# load elements
_binary_reader.load_elements(self.filename, ptr_elem, nelm,
e_disp_table, elem, etype, mtype, rcon, esys)
enum = self.resultheader['eeqv']
element_type = np.zeros_like(etype)
for key, typekey in ekey:
element_type[etype == key] = typekey
components = self._read_components()
# store geometry dictionary
self.geometry = {'nnum': nnum,
'nodes': nloc,
'etype': etype,
'elem': elem,
'enum': enum,
'ekey': np.asarray(ekey, ctypes.c_int),
'esys': esys,
'e_rcon': rcon,
'mtype': mtype,
'Element Type': element_type,
'coord systems': c_systems,
'components': components}
# store the reference array
# Allow quadradic and null unallowed
parsed = _parser.parse(self.geometry, False, valid_types, True,
keyopts)
cells = parsed['cells']
offset = parsed['offset']
cell_type = parsed['cell_type']
self.numref = parsed['numref']
# catch -1
cells[cells == -1] = 0
# identify nodes that are actually in the solution
self.insolution = np.in1d(self.geometry['nnum'],
self.resultheader['neqv'])
# Create vtk object
nodes = nloc[:, :3]
self.quadgrid = pv.UnstructuredGrid(offset, cells, cell_type, nodes)
self.quadgrid.cell_arrays['ansys_elem_num'] = enum
self.quadgrid.point_arrays['ansys_node_num'] = nnum
self.quadgrid.cell_arrays['Element Type'] = element_type
# add node components
for component_name in components:
mask = np.in1d(nnum, components[component_name])
self.quadgrid.point_arrays[component_name] = mask
self.grid = self.quadgrid.linear_copy()
def solution_info(self, rnum):
"""Return an informative dictionary of solution data for a
result.
Returns
-------
header : dict
Double precision solution header data.
Notes
-----
The keys of the solution header is described below:
timfrq - Time value (or frequency value, for a modal or
harmonic analysis)
lfacto - the "old" load factor (used in ramping a load
between old and new values)
lfactn - the "new" load factor
cptime - elapsed cpu time (in seconds)
tref - the reference temperature
tunif - the uniform temperature
tbulk - Bulk temp for FLOTRAN film coefs.
VolBase - Initial total volume for VOF
tstep - Time Step size for FLOTRAN analysis
0.0 - position not used
accel - linear acceleration terms
omega - angular velocity (first 3 terms) and angular acceleration
(second 3 terms)
omegacg - angular velocity (first 3 terms) and angular
acceleration (second 3 terms) these velocity/acceleration
terms are computed about the center of gravity
cgcent - (x,y,z) location of center of gravity
fatjack - FATJACK ocean wave data (wave height and period)
dval1 - if pmeth=0: FATJACK ocean wave direction
if pmeth=1: p-method convergence values
pCnvVal - p-method convergence values
"""
# Check if result is available
if rnum > self.nsets - 1:
raise Exception('There are only %d results in the result file.'
% self.nsets)
# skip pointers table
ptr = self.resultheader['rpointers'][rnum]
_, sz = self.read_record(ptr, True)
table = self.read_record(ptr + sz)
return parse_header(table, SOLUTION_HEADER_KEYS_DP)
def _element_solution_header(self, rnum):
""" Get element solution header information """
# Get the header information from the header dictionary
rpointers = self.resultheader['rpointers']
nodstr = self.element_table['nodstr']
etype = self.geometry['etype']
# read result solution header
record = self.read_record(rpointers[rnum])
solution_header = parse_header(record, SOLUTION_DATA_HEADER_KEYS)
# key to extrapolate integration
# = 0 - move
# = 1 - extrapolate unless active
# non-linear
# = 2 - extrapolate always
if solution_header['rxtrap'] == 0:
warnings.warn('Strains and stresses are being evaluated at ' +
'gauss points and not extrapolated')
# 64-bit pointer to element solution
if not solution_header['ptrESL']:
raise Exception('No element solution in result set %d\n'
% (rnum + 1) + 'Try running with "MXPAND,,,,YES"')
# Seek to element result header
element_rst_ptr = rpointers[rnum] + solution_header['ptrESL']
ele_ind_table = self.read_record(element_rst_ptr).view(np.int64)
ele_ind_table += element_rst_ptr
# boundary conditions
# bc = self.read_record(rpointers[rnum] + solution_header['ptrBC'])
return ele_ind_table, nodstr, etype
@property
def version(self):
""" The version of ANSYS used to generate this result file """
return float(self.resultheader['verstring'])
def element_stress(self, rnum, principal=False, in_element_coord_sys=False):
"""Retrives the element component stresses.
Equivalent ANSYS command: PRESOL, S
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
principal : bool, optional
Returns principal stresses instead of component stresses.
Default False.
in_element_coord_sys : bool, optional
Returns the results in the element coordinate system.
Default False and will return the results in the global
coordinate system.
Returns
-------
element_stress : list
Stresses at each element for each node for Sx Sy Sz Sxy
Syz Sxz. or SIGMA1, SIGMA2, SIGMA3, SINT, SEQV when
principal is True.
enum : np.ndarray
ANSYS element numbers corresponding to each element.
enode : list
Node numbers corresponding to each element's stress
results. One list entry for each element.
Notes
-----
Shell stresses for element 181 are returned for top and bottom
layers. Results are ordered such that the top layer and then
the bottom layer is reported.
"""
rnum = self.parse_step_substep(rnum)
ele_ind_table, nodstr, etype = self._element_solution_header(rnum)
# certain element types do not output stress
elemtype = self.geometry['Element Type'].astype(np.int32)
etype = etype.astype(ctypes.c_int64)
# load in raw results
nnode = nodstr[etype]
nelemnode = nnode.sum()
# bitmask (might use this at some point)
# bitmask = bin(int(hex(self.resultheader['rstsprs']), base=16)).lstrip('0b')
# description maybe in resucm.inc
if self.version >= 14.5:
if self.resultheader['rstsprs'] != 0:
nitem = 6
else:
nitem = 11
# add extra elements to data array. Sometimes there are
# more items than listed in the result header (or there's a mistake here)
ele_data_arr = np.empty((nelemnode + 50, nitem), np.float32)
ele_data_arr[:] = np.nan
_binary_reader.read_element_stress(self.filename,
ele_ind_table,
nodstr.astype(np.int64),
etype, ele_data_arr,
nitem, elemtype,
as_global=not in_element_coord_sys)
if nitem != 6:
ele_data_arr = ele_data_arr[:, :6]
else:
raise NotImplementedError('Not implemented for ANSYS older than v14.5')
# trim off extra data
ele_data_arr = ele_data_arr[:nelemnode]
if principal:
ele_data_arr, isnan = _binary_reader.compute_principal_stress(ele_data_arr)
ele_data_arr[isnan] = np.nan
splitind = np.cumsum(nnode)
element_stress = np.split(ele_data_arr, splitind[:-1])
# reorder list using sorted indices
# enum = self.grid.cell_arrays['ansys_elem_num']
enum = self.geometry['enum']
sidx = np.argsort(enum)
element_stress = [element_stress[i] for i in sidx]
elem = self.geometry['elem']
enode = []
for i in sidx:
enode.append(elem[i, :nnode[i]])
# Get element numbers
elemnum = self.geometry['enum'][self.sidx_elem]
return element_stress, elemnum, enode
def element_solution_data(self, rnum, datatype, sort=True):
"""Retrives element solution data. Similar to ETABLE.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a list containing
(step, substep) of the requested result.
datatype : str
Element data type to retreive.
- EMS: misc. data
- ENF: nodal forces
- ENS: nodal stresses
- ENG: volume and energies
- EGR: nodal gradients
- EEL: elastic strains
- EPL: plastic strains
- ECR: creep strains
- ETH: thermal strains
- EUL: euler angles
- EFX: nodal fluxes
- ELF: local forces
- EMN: misc. non-sum values
- ECD: element current densities
- ENL: nodal nonlinear data
- EHC: calculated heat generations
- EPT: element temperatures
- ESF: element surface stresses
- EDI: diffusion strains
- ETB: ETABLE items
- ECT: contact data
- EXY: integration point locations
- EBA: back stresses
- ESV: state variables
- MNL: material nonlinear record
Returns
-------
enum : np.ndarray
Element numbers.
element_data : list
List with one data item for each element.
Notes
-----
See ANSYS element documentation for available items for each element type.
ENG - Element volume and energies.
volume: Element volume
senergy: Element energy associated with the stiffness matrix
aenergy: Artificial hourglass energy
kenergy: Kinetic energy
coenergy: Co-energy (magnetics)
incenergy: Position not used
position not used
thenergy: Thermal dissipation energy (see ThermMat, shell131/132 only)
position not used
position not used
"""
table_ptr = datatype.upper()
if table_ptr not in ELEMENT_INDEX_TABLE_KEYS:
err_str = 'Data type %s is invalid\n' % str(datatype)
err_str += '\nAvailable types:\n'
for key in ELEMENT_INDEX_TABLE_KEYS:
err_str += '\t%s: %s\n' % (key, ELEMENT_INDEX_TABLE_INFO[key])
raise ValueError(err_str)
# location of data pointer within each element result table
table_index = ELEMENT_INDEX_TABLE_KEYS.index(table_ptr)
rnum = self.parse_step_substep(rnum)
ele_ind_table, _, _ = self._element_solution_header(rnum)
# read element data
element_data = []
for ind in ele_ind_table:
# read element table index pointer to data
ptr = self.read_record(ind)[table_index]
if ptr > 0:
record = self.read_record(ind + ptr)
element_data.append(record)
else:
element_data.append(None)
enum = self.grid.cell_arrays['ansys_elem_num']
if sort:
sidx = np.argsort(enum)
enum = enum[sidx]
element_data = [element_data[i] for i in sidx]
return enum, element_data
def principal_nodal_stress(self, rnum):
"""Computes the principal component stresses for each node in
the solution.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
Returns
-------
nodenum : numpy.ndarray
Node numbers of the result.
pstress : numpy.ndarray
Principal stresses, stress intensity, and equivalant stress.
[sigma1, sigma2, sigma3, sint, seqv]
Notes
-----
ANSYS equivalant of:
PRNSOL, S, PRIN
which returns:
S1, S2, S3 principal stresses, SINT stress intensity, and SEQV
equivalent stress.
"""
# get component stress
nodenum, stress = self.nodal_stress(rnum)
# compute principle stress
if stress.dtype != np.float32:
stress = stress.astype(np.float32)
pstress, isnan = _binary_reader.compute_principal_stress(stress)
pstress[isnan] = np.nan
return nodenum, pstress
def plot_principal_nodal_stress(self, rnum, stype=None, node_components=None,
sel_type_all=True, **kwargs):
"""Plot the principal stress at each node in the solution.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
stype : string
Stress type to plot. S1, S2, S3 principal stresses, SINT
stress intensity, and SEQV equivalent stress.
Stress type must be a string from the following list:
['S1', 'S2', 'S3', 'SINT', 'SEQV']
node_components : list, optional
Accepts either a string or a list strings of node
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
sel_type_all : bool, optional
If node_components is specified, plots those elements
containing all nodes of the component. Default True.
kwargs : keyword arguments
Additional keyword arguments. See help(pyvista.plot)
Returns
-------
cpos : list
VTK camera position.
stress : np.ndarray
Array used to plot stress.
"""
if stype is None:
raise Exception("Stress type must be a string from the following list:\n" +
"['1', '2', '3', 'INT', 'EQV']")
rnum = self.parse_step_substep(rnum)
stress = self.principle_stress_for_plotting(rnum, stype)
if node_components:
grid, ind = self._extract_node_components(node_components, sel_type_all)
stress = stress[ind]
else:
grid = self.grid
# Generate plot
return self._plot_point_scalars(stress, rnum=rnum, grid=grid, **kwargs)
def cs_4x4(self, cs_cord, as_vtk_matrix=False):
""" return a 4x4 transformation array for a given coordinate system """
# assemble 4 x 4 matrix
csys = self.geometry['coord systems'][cs_cord]
trans = np.hstack((csys['transformation matrix'],
csys['origin'].reshape(-1, 1)))
matrix = trans_to_matrix(trans)
if as_vtk_matrix:
return matrix
else:
return pv.trans_from_matrix(matrix)
def _plot_point_scalars(self, scalars, rnum=None, grid=None,
show_displacement=False, displacement_factor=1,
add_text=True, animate=False, nangles=100,
movie_filename=None, max_disp=0.1, **kwargs):
"""Plot point scalars on active mesh.
Parameters
----------
scalars : np.ndarray
Node scalars to plot.
rnum : int, optional
Cumulative result number. Used for adding informative
text.
grid : pyvista.PolyData or pyvista.UnstructuredGrid, optional
Uses self.grid by default. When specified, uses this grid
instead.
show_displacement : bool, optional
Deforms mesh according to the result.
displacement_factor : float, optional
Increases or decreases displacement by a factor.
add_text : bool, optional
Adds information about the result when rnum is given.
kwargs : keyword arguments
Additional keyword arguments. See help(pyvista.plot)
Returns
-------
cpos : list
Camera position.
"""
if grid is None:
grid = self.grid
disp = None
if show_displacement and not animate:
disp = self.nodal_solution(rnum)[1][:, :3]*displacement_factor
new_points = disp + grid.points
grid = grid.copy()
grid.points = new_points
elif animate:
disp = self.nodal_solution(rnum)[1][:, :3]
# extract mesh surface
mapped_indices = None
if 'vtkOriginalPointIds' in grid.point_arrays:
mapped_indices = grid.point_arrays['vtkOriginalPointIds']
mesh = grid.extract_surface()
ind = mesh.point_arrays['vtkOriginalPointIds']
if disp is not None:
if mapped_indices is not None:
disp = disp[mapped_indices][ind]
else:
disp = disp[ind]
if animate: # scale for max displacement
disp /= (np.abs(disp).max()/max_disp)
if scalars is not None:
if scalars.ndim == 2:
scalars = scalars[:, ind]
else:
scalars = scalars[ind]
rng = kwargs.pop('rng', [scalars.min(), scalars.max()])
else:
rng = kwargs.pop('rng', None)
cmap = kwargs.pop('cmap', 'jet')
smooth_shading = kwargs.pop('smooth_shading', True)
window_size = kwargs.pop('window_size', [1024, 768])
full_screen = kwargs.pop('full_screen', False)
notebook = kwargs.pop('notebook', False)
off_screen = kwargs.pop('off_screen', None)
cpos = kwargs.pop('cpos', None)
screenshot = kwargs.pop('screenshot', None)
color = kwargs.pop('color', 'w')
interpolate_before_map = kwargs.pop('interpolate_before_map', True)
interactive = kwargs.pop('interactive', True)
stitle = kwargs.pop('stitle', None)
# coordinate transformation for cyclic replication
cs_cord = self.resultheader['csCord']
if cs_cord > 1:
matrix = self.cs_4x4(cs_cord, as_vtk_matrix=True)
i_matrix = self.cs_4x4(cs_cord, as_vtk_matrix=True)
i_matrix.Invert()
else:
matrix = vtk.vtkMatrix4x4()
i_matrix = vtk.vtkMatrix4x4()
plotter = pv.Plotter(off_screen=off_screen, notebook=notebook)
# set axes
if kwargs.pop('show_axes', True):
plotter.add_axes()
# set background
plotter.background_color = kwargs.pop('background', None)
n_sector = 1
if np.any(scalars):
if self.n_sector > 1:
if scalars.ndim != 2:
n_sector = 1
scalars = [scalars]
elif scalars.ndim == 1:
scalars = [scalars]
else:
n_sector = self.n_sector
elif scalars.ndim == 1:
scalars = [scalars]
else:
if self.n_sector > 1:
if kwargs.pop('full_rotor', True):
n_sector = self.n_sector
scalars = [None]*n_sector
else:
scalars = [None]
else:
scalars = [None]
rang = 360.0 / self.n_sector
copied_meshes = []
# remove extra keyword args
kwargs.pop('node_components', None)
kwargs.pop('sel_type_all', None)
if kwargs.pop('overlay_wireframe', False):
plotter.add_mesh(self.grid,
color='w',
style='wireframe',
opacity=0.5,
**kwargs)
for i in range(n_sector):
copied_mesh = mesh.copy(False)
actor = plotter.add_mesh(copied_mesh,
color=color,
scalars=scalars[i],
rng=rng,
smooth_shading=smooth_shading,
interpolate_before_map=interpolate_before_map,
stitle=stitle,
cmap=cmap,
**kwargs)
# transform to standard position, rotate about Z axis,
# transform back
vtk_transform = vtk.vtkTransform()
vtk_transform.RotateZ(rang*i)
vtk_transform.Update()
rot_matrix = vtk_transform.GetMatrix()
if cs_cord > 1:
temp_matrix = vtk.vtkMatrix4x4()
rot_matrix.Multiply4x4(i_matrix, rot_matrix, temp_matrix)
rot_matrix.Multiply4x4(temp_matrix, matrix, rot_matrix)
vtk_transform.SetMatrix(rot_matrix)
actor.SetUserTransform(vtk_transform)
# plotter.add_scalar_bar()
# NAN/missing data are white
# plotter.renderers[0].SetUseDepthPeeling(1) # <-- for transparency issues
plotter.mapper.GetLookupTable().SetNanColor(1, 1, 1, 1)
if cpos:
plotter.camera_position = cpos
if movie_filename:
plotter.open_movie(movie_filename)
# add table
if add_text and rnum is not None:
result_text = self.text_result_table(rnum)
actor = plotter.add_text(result_text, font_size=20)
if animate:
orig_pts = copied_mesh.points.copy()
plotter.show(interactive=False, auto_close=False,
interactive_update=not off_screen)
self.animating = True
def q_callback():
"""exit when user wants to leave"""
self.animating = False
plotter.add_key_event("q", q_callback)
first_loop = True
while self.animating:
for angle in np.linspace(0, np.pi*2, nangles):
mag_adj = np.sin(angle)
if scalars[0] is not None:
copied_mesh['Data'] = scalars[0]*mag_adj
copied_mesh.points = orig_pts + disp*mag_adj
if add_text:
# 2 maps to vtk.vtkCornerAnnotation.UpperLeft
plotter.textActor.SetText(2, '%s\nPhase %.1f Degrees' %
(result_text, (angle*180/np.pi)))
plotter.update(30, force_redraw=True)
if not self.animating:
break
if movie_filename and first_loop:
plotter.write_frame()
first_loop = False
if off_screen or interactive is False:
break
plotter.close()
elif screenshot:
cpos = plotter.show(auto_close=False, interactive=interactive,
window_size=window_size,
full_screen=full_screen)
if screenshot is True:
img = plotter.screenshot()
else:
plotter.screenshot(screenshot)
plotter.close()
else:
cpos = plotter.show(interactive=interactive,
window_size=window_size,
full_screen=full_screen)
if screenshot is True:
return cpos, img
return cpos
def text_result_table(self, rnum):
""" Returns a text result table for plotting """
ls_table = self.resultheader['ls_table']
timevalue = self.time_values[rnum]
text = 'Cumulative Index: {:3d}\n'.format(ls_table[rnum, 2])
if self.resultheader['nSector'] > 1:
hindex = self.resultheader['hindex'][rnum]
text += 'Harmonic Index {:3d}\n'.format(hindex)
text += 'Loadstep: {:3d}\n'.format(ls_table[rnum, 0])
text += 'Substep: {:3d}\n'.format(ls_table[rnum, 1])
text += 'Time Value: {:10.4f}'.format(timevalue)
return text
def principle_stress_for_plotting(self, rnum, stype):
"""
returns stress used to plot
"""
stress_types = ['1', '2', '3', 'INT', 'EQV']
if stype.upper() not in stress_types:
raise Exception('Stress type not in \n' + str(stress_types))
sidx = stress_types.index(stype)
_, stress = self.principal_nodal_stress(rnum)
return stress[:, sidx]
def plot_nodal_stress(self, rnum, comp=None,
show_displacement=False,
displacement_factor=1,
node_components=None,
sel_type_all=True, **kwargs):
"""Plots the stresses at each node in the solution.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
comp : str, optional
Thermal strain component to display. Available options:
- ``"X"``
- ``"Y"``
- ``"Z"``
- ``"XY"``
- ``"YZ"``
- ``"XZ"``
node_components : list, optional
Accepts either a string or a list strings of node
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
sel_type_all : bool, optional
If node_components is specified, plots those elements
containing all nodes of the component. Default True.
kwargs : keyword arguments
Additional keyword arguments. See help(pyvista.plot)
Returns
-------
cpos : list
3 x 3 vtk camera position.
"""
available_comps = ['X', 'Y', 'Z', 'XY', 'YZ', 'XZ']
if comp is None:
raise ValueError('Missing "comp" parameter. Please select from the following:\n%s' % available_comps)
kwargs['stitle'] = '%s Component Nodal Stress' % comp
self._plot_nodal_result(rnum, 'ENS', comp, available_comps, show_displacement,
displacement_factor, node_components,
sel_type_all, **kwargs)
def save_as_vtk(self, filename):
"""Appends all results to an unstructured grid and writes it to
disk.
The file extension will select the type of writer to use.
'.vtk' will use the legacy writer, while '.vtu' will select
the VTK XML writer.
Parameters
----------
filename : str
Filename of grid to be written. The file extension will
select the type of writer to use. '.vtk' will use the
legacy writer, while '.vtu' will select the VTK XML
writer.
Notes
-----
Binary files write much faster than ASCII, but binary files
written on one system may not be readable on other systems.
Binary can only be selected for the legacy writer.
"""
# Copy grid as to not write results to original object
grid = self.grid.copy()
for i in range(self.nsets):
# Nodal results
_, val = self.nodal_solution(i)
grid.point_arrays['nodal_solution{:03d}'.format(i)] = val
# Populate with nodal stress at edge nodes
# nodenum = self.grid.point_arrays['ansys_node_num']
_, stress = self.nodal_stress(i)
grid.point_arrays['nodal_stress{:03d}'.format(i)] = stress
grid.save(filename)
def write_tables(self, filename):
""" Write binary tables to ASCII. Assumes int32 """
rawresult = open(self.filename, 'rb')
with open(filename, 'w') as f:
while True:
try:
table = read_table(rawresult)
f.write('*** %d ***\n' % len(table))
for item in table:
f.write(str(item) + '\n')
f.write('\n\n')
except:
break
rawresult.close()
def parse_step_substep(self, user_input):
""" Converts (step, substep) to a cumulative index """
if is_int(user_input):
# check if result exists
if user_input > self.nsets - 1:
raise Exception('Only %d result(s) in the result file.' % self.nsets)
return user_input
elif isinstance(user_input, (list, tuple)):
if len(user_input) != 2:
raise Exception('Input must contain (step, loadstep) using ' +
'1 based indexing (e.g. (1, 1)).')
ls_table = self.resultheader['ls_table']
step, substep = user_input
mask = np.logical_and(ls_table[:, 0] == step,
ls_table[:, 1] == substep)
if not np.any(mask):
raise Exception('Load step table does not contain ' +
'step %d and substep %d' % tuple(user_input))
index = mask.nonzero()[0]
assert index.size == 1, 'Multiple cumulative index matches'
return index[0]
else:
raise Exception('Input must be either an int or a list')
def __repr__(self):
rst_info = ['ANSYS MAPDL Result file object']
keys = ['title', 'subtitle', 'units']
for key in keys:
value = self.resultheader[key]
if value:
rst_info.append('{:<12s}: {:s}'.format(key.capitalize(), value))
value = self.resultheader['verstring']
rst_info.append('{:<12s}: {:s}'.format('Version', value))
value = str(self.resultheader['nSector'] > 1)
rst_info.append('{:<12s}: {:s}'.format('Cyclic', value))
value = self.resultheader['nsets']
rst_info.append('{:<12s}: {:d}'.format('Result Sets', value))
value = self.resultheader['nnod']
rst_info.append('{:<12s}: {:d}'.format('Nodes', value))
value = self.resultheader['nelm']
rst_info.append('{:<12s}: {:d}'.format('Elements', value))
return '\n'.join(rst_info)
def _nodal_result(self, rnum, result_type):
"""Generic load nodal result
Parameters
----------
rnum : int
Result number.
result_type : int
EMS: misc. data
ENF: nodal forces
ENS: nodal stresses
ENG: volume and energies
EGR: nodal gradients
EEL: elastic strains
EPL: plastic strains
ECR: creep strains
ETH: thermal strains
EUL: euler angles
EFX: nodal fluxes
ELF: local forces
EMN: misc. non-sum values
ECD: element current densities
ENL: nodal nonlinear data
EHC: calculated heat
EPT: element temperatures
ESF: element surface stresses
EDI: diffusion strains
ETB: ETABLE items(post1 only
ECT: contact data
EXY: integration point locations
EBA: back stresses
ESV: state variables
MNL: material nonlinear record
Returns
-------
nnum : np.ndarray
ANSYS node numbers
result : np.ndarray
Array of result data
"""
# element header
rnum = self.parse_step_substep(rnum)
ele_ind_table, nodstr, etype = self._element_solution_header(rnum)
result_type = result_type.upper()
if self.resultheader['rstsprs'] == 0 and result_type == 'ENS':
nitem = 11
elif result_type in ELEMENT_RESULT_NCOMP:
nitem = ELEMENT_RESULT_NCOMP[result_type]
else:
nitem = 1
result_index = ELEMENT_INDEX_TABLE_KEYS.index(result_type)
# Element types for nodal averaging
elemtype = self.geometry['Element Type'].astype(np.int32)
if self.version < 14.5: # values stored as double precision
tarr = np.empty(1, np.float64)
my_dtype = 1
else: # values stored as single precision
tarr = np.empty(1, np.float32)
my_dtype = 0
data, ncount = _binary_reader.read_nodal_values(self.filename,
self.grid.celltypes,
ele_ind_table,
self.grid.offset,
self.grid.cells,
nitem,
self.grid.number_of_points,
nodstr,
etype,
elemtype,
result_index,
tarr,
my_dtype)
if result_type == 'ENS' and nitem != 6:
data = data[:, :6]
nnum = self.grid.point_arrays['ansys_node_num']
if np.isnan(data).all():
raise ValueError('Result file contains no %s records for result %d' %
(ELEMENT_INDEX_TABLE_INFO[result_type.upper()], rnum))
# average across nodes
result = data/ncount.reshape(-1, 1)
return nnum, result
def nodal_stress(self, rnum):
"""Retrieves the component stresses for each node in the
solution.
The order of the results corresponds to the sorted node
numbering.
Computes the nodal stress by averaging the stress for each
element at each node. Due to the discontinuities across
elements, stresses will vary based on the element they are
evaluated from.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
Returns
-------
nnum : numpy.ndarray
Node numbers of the result.
stress : numpy.ndarray
Stresses at X, Y, Z, XY, YZ, and XZ Sxz averaged at each corner
node.
Examples
--------
>>> import pyansys
>>> rst = pyansys.read_binary('file.rst')
>>> nnum, stress = rst.nodal_stress(0)
Notes
-----
Nodes without a stress value will be NAN.
Equivalent ANSYS command: PRNSOL, S
"""
return self._nodal_result(rnum, 'ENS')
def nodal_temperature(self, rnum):
"""Retrieves the temperature for each node in the
solution.
The order of the results corresponds to the sorted node
numbering.
Equivalent MAPDL command: PRNSOL, TEMP
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
Returns
-------
nnum : numpy.ndarray
Node numbers of the result.
temperature : numpy.ndarray
Tempature at each node.
Examples
--------
>>> import pyansys
>>> rst = pyansys.read_binary('file.rst')
>>> nnum, stress = rst.nodal_temperature(0)
"""
nnum, temp = self._nodal_result(rnum, 'EPT')
temp = temp.ravel()
return nnum, temp
def plot_nodal_temperature(self, rnum, show_displacement=False,
displacement_factor=1, node_components=None,
sel_type_all=True, **kwargs):
"""Plot nodal temperature
Parameters
----------
rnum : int
Result number
show_displacement : bool, optional
Deforms mesh according to the result.
displacement_factor : float, optional
Increases or decreases displacement by a factor.
node_components : list, optional
Accepts either a string or a list strings of node
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
sel_type_all : bool, optional
If node_components is specified, plots those elements
containing all nodes of the component. Default True.
**kwargs : keyword arguments
Optional keyword arguments. See help(pyvista.plot)
Examples
--------
Plot thermal strain of a sample file
>>> import pyansys
>>> result = pyansys.read_binary('file.rst')
>>> result.plot_nodal_temperature(0)
Plot while showing edges and disabling lighting
>>> result.plot_nodal_temperature(0, show_edges=True, lighting=False)
"""
_, scalars = self.nodal_temperature(rnum)
grid = self.grid
if node_components:
grid, ind = self._extract_node_components(node_components, sel_type_all)
scalars = scalars[ind]
return self._plot_point_scalars(scalars, grid=grid, rnum=rnum,
show_displacement=show_displacement,
displacement_factor=displacement_factor,
stitle='Nodal Tempature',
**kwargs)
def nodal_thermal_strain(self, rnum):
"""Nodal component plastic strains. This record contains
strains in the order X, Y, Z, XY, YZ, XZ, EQV, and eswell
(element swelling strain). Plastic strains are always values
at the integration points moved to the nodes.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
Returns
-------
nnum : np.ndarray
ANSYS node numbers.
thermal_strain : np.ndarray
Nodal component plastic strains. Array is in the order
X, Y, Z, XY, YZ, XZ, EQV, ESWELL
Examples
--------
Load the nodal thermal strain for the first solution
>>> import pyansys
>>> rst = pyansys.read_binary('file.rst')
>>> nnum, thermal_strain = rst.nodal_thermal_strain(0)
"""
return self._nodal_result(rnum, 'ETH')
def plot_nodal_thermal_strain(self, rnum, comp,
stitle='Nodal Thermal Strain',
show_displacement=False,
displacement_factor=1,
node_components=None,
sel_type_all=True, **kwargs):
"""Plot nodal component plastic strains.
Parameters
----------
rnum : int
Result number
comp : str, optional
Thermal strain component to display. Available options:
- ``"X"``
- ``"Y"``
- ``"Z"``
- ``"XY"``
- ``"YZ"``
- ``"XZ"``
- ``"EQV"``
- ``"ESWELL"``
show_displacement : bool, optional
Deforms mesh according to the result.
displacement_factor : float, optional
Increases or decreases displacement by a factor.
node_components : list, optional
Accepts either a string or a list strings of node
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
sel_type_all : bool, optional
If node_components is specified, plots those elements
containing all nodes of the component. Default True.
**kwargs : keyword arguments
Optional keyword arguments. See help(pyvista.plot)
Examples
--------
Plot thermal strain for result 0 of verification manual example 33
>>> import pyansys
>>> result = pyansys.download_verification_result(33)
>>> result.plot_nodal_thermal_strain(0)
"""
available_comps = ['X', 'Y', 'Z', 'XY', 'YZ', 'XZ', 'EQV', 'ESWELL']
return self._plot_nodal_result(rnum, 'ETH', comp, available_comps,
show_displacement=show_displacement,
displacement_factor=displacement_factor,
node_components=node_components,
sel_type_all=sel_type_all,
stitle=stitle,
**kwargs)
def nodal_elastic_strain(self, rnum):
"""Nodal component elastic strains. This record contains
strains in the order X, Y, Z, XY, YZ, XZ, EQV.
Elastic strains can be can be nodal values extrapolated from
the integration points or values at the integration points moved to
the nodes
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
Returns
-------
nnum : np.ndarray
ANSYS node numbers.
elastic_strain : np.ndarray
Nodal component elastic strains. Array is in the order
X, Y, Z, XY, YZ, XZ, EEL.
Examples
--------
Load the nodal elastic strain for the first solution.
>>> import pyansys
>>> rst = pyansys.read_binary('file.rst')
>>> nnum, elastic_strain = rst.nodal_elastic_strain(0)
"""
return self._nodal_result(rnum, 'EEL')
def plot_nodal_elastic_strain(self, rnum, comp,
stitle='Nodal Elastic Strain',
show_displacement=False,
displacement_factor=1,
node_components=None,
sel_type_all=True, **kwargs):
"""Plot nodal component elastic strains.
Parameters
----------
rnum : int
Result number
comp : str, optional
Thermal strain component to display. Available options:
- ``"X"``
- ``"Y"``
- ``"Z"``
- ``"XY"``
- ``"YZ"``
- ``"XZ"``
- ``"EQV"``
show_displacement : bool, optional
Deforms mesh according to the result.
displacement_factor : float, optional
Increases or decreases displacement by a factor.
node_components : list, optional
Accepts either a string or a list strings of node
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
sel_type_all : bool, optional
If node_components is specified, plots those elements
containing all nodes of the component. Default True.
**kwargs : keyword arguments
Optional keyword arguments. See help(pyvista.plot)
Examples
--------
Plot thermal strain for a static pontoon model
>>> import pyansys
>>> result = pyansys.download_pontoon()
>>> result.plot_nodal_elastic_strain(0)
"""
available_comps = ['X', 'Y', 'Z', 'XY', 'YZ', 'XZ', 'EQV']
stitle = ' '.join([comp.upper(), stitle])
return self._plot_nodal_result(rnum, 'EEL',
comp,
available_comps,
show_displacement=show_displacement,
displacement_factor=displacement_factor,
node_components=node_components,
sel_type_all=sel_type_all,
stitle=stitle,
**kwargs)
def nodal_plastic_strain(self, rnum):
"""Nodal component plastic strains. This record contains
strains in the order X, Y, Z, XY, YZ, XZ, EQV.
Plastic strains are always values at the integration points
moved to the nodes.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
Returns
-------
nnum : np.ndarray
ANSYS node numbers.
plastic_strain : np.ndarray
Nodal component plastic strains. Array is in the order
X, Y, Z, XY, YZ, XZ, EEL.
Examples
--------
Load the nodal plastic strain for the first solution.
>>> import pyansys
>>> rst = pyansys.read_binary('file.rst')
>>> nnum, plastic_strain = rst.nodal_plastic_strain(0)
"""
return self._nodal_result(rnum, 'EPL')
def plot_nodal_plastic_strain(self, rnum, comp,
stitle='Nodal Plastic Strain',
show_displacement=False,
displacement_factor=1,
node_components=None,
sel_type_all=True, **kwargs):
"""Plot nodal component plastic strains.
Parameters
----------
rnum : int
Result number
comp : str, optional
Thermal strain component to display. Available options:
- ``"X"``
- ``"Y"``
- ``"Z"``
- ``"XY"``
- ``"YZ"``
- ``"XZ"``
- ``"EQV"``
show_displacement : bool, optional
Deforms mesh according to the result.
displacement_factor : float, optional
Increases or decreases displacement by a factor.
node_components : list, optional
Accepts either a string or a list strings of node
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
sel_type_all : bool, optional
If node_components is specified, plots those elements
containing all nodes of the component. Default True.
**kwargs : keyword arguments
Optional keyword arguments. See help(pyvista.plot)
Examples
--------
Plot plastic strain for a static pontoon model
>>> import pyansys
>>> result = pyansys.download_pontoon()
>>> result.plot_nodal_plastic_strain(0)
"""
available_comps = ['X', 'Y', 'Z', 'XY', 'YZ', 'XZ', 'EQV']
stitle = ' '.join([comp.upper(), stitle])
return self._plot_nodal_result(rnum, 'EPL',
comp,
available_comps,
show_displacement=show_displacement,
displacement_factor=displacement_factor,
node_components=node_components,
sel_type_all=sel_type_all,
stitle=stitle,
**kwargs)
def _plot_nodal_result(self, rnum, result_type, comp, available_comps,
show_displacement=False, displacement_factor=1,
node_components=None,
sel_type_all=True, **kwargs):
"""Plot nodal results"""
comp = comp.upper()
if comp not in available_comps:
raise ValueError('Invalid component. Pick one of the following: %s' %
str(available_comps))
component_index = available_comps.index(comp)
_, result = self._nodal_result(rnum, result_type)
scalars = result[:, component_index]
if node_components:
grid, ind = self._extract_node_components(node_components, sel_type_all)
scalars = scalars[ind]
else:
grid = self.grid
return self._plot_point_scalars(scalars, grid=grid, rnum=rnum,
show_displacement=show_displacement,
displacement_factor=displacement_factor,
**kwargs)
def _animate_time_solution(self, result_type, index=0, frame_rate=10,
show_displacement=True, displacement_factor=1,
off_screen=None):
"""Animate time solution result"""
# load all results
results = []
for i in range(self.nsets):
results.append(self._nodal_result(i, result_type)[1][:, index])
if show_displacement:
disp = []
for i in range(self.nsets):
disp.append(self.nodal_solution(i)[1][:, :3]*displacement_factor)
mesh = self.grid.copy()
results = np.array(results)
if np.all(np.isnan(results)):
raise ValueError('Result file contains no %s records' %
ELEMENT_INDEX_TABLE_INFO[result_type.upper()])
# prepopulate mesh with data
mesh['data'] = results[0]
# set default range
rng = [results.min(), results.max()]
t_wait = 1/frame_rate
def plot_thread():
plotter = pv.Plotter(off_screen=off_screen)
plotter.add_mesh(mesh, scalars='data', rng=rng)
plotter.show(auto_close=False, interactive_update=True, interactive=False)
text_actor = plotter.add_text('Result 1')
while not plotter.q_pressed:
for i in range(self.nsets):
mesh['data'] = results[i]
if show_displacement:
mesh.points = self.grid.points + disp[i]
# if interactive:
plotter.update(30, force_redraw=True)
if hasattr(text_actor, 'SetInput'):
text_actor.SetInput('Result %d' % (i + 1))
else:
text_actor.SetText(0, 'Result %d' % (i + 1))
if plotter.q_pressed:
break
time.sleep(t_wait)
if off_screen:
break
plotter.close()
thread = Thread(target=plot_thread)
thread.start()
@property
def available_results(self):
"""Prints available element result types and returns those keys"""
ele_ind_table, _, _ = self._element_solution_header(0)
# get the keys from the first element (not ideal...)
mask = self.read_record(ele_ind_table[0]) > 0
keys = [ELEMENT_INDEX_TABLE_KEYS[i] for i in mask.nonzero()[0]]
available = {}
for key in keys:
available[key] = ELEMENT_INDEX_TABLE_INFO[key]
return available
def pol2cart(rho, phi):
""" Convert cylindrical to cartesian """
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return x, y
def is_int(value):
""" Return true if can be parsed as an int """
try:
int(value)
return True
except:
return False
def trans_to_matrix(trans):
""" Convert a numpy.ndarray to a vtk.vtkMatrix4x4 """
matrix = vtk.vtkMatrix4x4()
for i in range(trans.shape[0]):
for j in range(trans.shape[1]):
matrix.SetElement(i, j, trans[i, j])
return matrix
def transform(points, trans):
"""In-place 3d transformation of a points array given a 4x4
transformation matrix.
Parameters
----------
points : np.ndarray or vtk.vtkTransform
Points to transform.
transform : np.ndarray or vtk.vtkTransform
4x4 transformation matrix.
"""
if isinstance(trans, vtk.vtkMatrix4x4):
trans = pv.trans_from_matrix(trans)
_binary_reader.affline_transform(points, trans)
|
base_camera_rtmp.py | import time
import threading
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
class CameraEvent(object):
"""An Event-like class that signals all active clients when a new frame is
available.
"""
def __init__(self):
self.events = {}
def wait(self):
"""Invoked from each client's thread to wait for the next frame."""
ident = get_ident()
if ident not in self.events:
# this is a new client
# add an entry for it in the self.events dict
# each entry has two elements, a threading.Event() and a timestamp
self.events[ident] = [threading.Event(), time.time()]
return self.events[ident][0].wait()
def set(self):
"""Invoked by the camera thread when a new frame is available."""
now = time.time()
remove = None
for ident, event in self.events.items():
if not event[0].isSet():
# if this client's event is not set, then set it
# also update the last set timestamp to now
event[0].set()
event[1] = now
else:
# if the client's event is already set, it means the client
# did not process a previous frame
# if the event stays set for more than 5 seconds, then assume
# the client is gone and remove it
if now - event[1] > 5:
remove = ident
if remove:
del self.events[remove]
def clear(self):
"""Invoked from each client's thread after a frame was processed."""
self.events[get_ident()][0].clear()
class BaseCamera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
event = CameraEvent()
def __init__(self):
"""Start the background camera thread if it isn't running yet."""
self.thread = None # background thread that reads frames from camera
self.frame = None # current frame is stored here by background thread
self.last_access = 0 # time of last client access to the camera
self.event = CameraEvent()
if self.thread is None:
self.last_access = time.time()
# start background frame thread
self.thread = threading.Thread(target=self._thread)
self.thread.start()
# wait until frames are available
while self.get_frame() is None:
time.sleep(0)
def get_frame(self):
"""Return the current camera frame."""
self.last_access = time.time()
# wait for a signal from the camera thread
self.event.wait()
self.event.clear()
return self.frame
@staticmethod
def frames():
""""Generator that returns frames from the camera."""
raise RuntimeError('Must be implemented by subclasses.')
def _thread(self):
"""Camera background thread."""
print('Starting camera thread.')
frames_iterator = self.frames()
for frame in frames_iterator:
self.frame = frame
self.event.set() # send signal to clients
time.sleep(0)
# if there hasn't been any clients asking for frames in
# the last 10 seconds then stop the thread
if time.time() - self.last_access > 10:
frames_iterator.close()
print('Stopping camera thread due to inactivity.')
break
self.thread = None
|
wallet_multiwallet.py | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a bitcoind node can load multiple wallet files
"""
from decimal import Decimal
from threading import Thread
import os
import shutil
import stat
import time
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
for _ in range(10):
if got_loading_error:
return
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == -4 and 'Wallet already being loading' in e.error['message']:
got_loading_error = True
return
class MultiWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 120
self.extra_args = [["-nowallet"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if name == self.default_wallet_name:
return wallet_dir(self.default_wallet_name, self.wallet_data_filename)
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), { 'wallets': [{ 'name': self.default_wallet_name }] })
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
os.symlink('..', wallet_dir('recursive_dir_symlink'))
os.mkdir(wallet_dir('self_walletdat_symlink'))
os.symlink('wallet.dat', wallet_dir('self_walletdat_symlink/wallet.dat'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
# create another dummy wallet for use in testing backups later
self.start_node(0)
node.createwallet("empty")
node.createwallet("plain")
node.createwallet("created")
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_file("empty"), empty_wallet)
shutil.rmtree(wallet_dir("empty"))
empty_created_wallet = os.path.join(self.options.tmpdir, 'empty.created.dat')
os.rename(wallet_dir("created", self.wallet_data_filename), empty_created_wallet)
shutil.rmtree(wallet_dir("created"))
os.rename(wallet_file("plain"), wallet_dir("w8"))
shutil.rmtree(wallet_dir("plain"))
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly. Not tested for SQLite wallets as this is a deprecated BDB behavior.
# '' - to verify default wallet file is created correctly
to_create = ['w1', 'w2', 'w3', 'w', 'sub/w5', 'w7_symlink']
in_wallet_dir = [w.replace('/', os.path.sep) for w in to_create] # Wallets in the wallet dir
in_wallet_dir.append('w7') # w7 is not loaded or created, but will be listed by listwalletdir because w7_symlink
to_create.append(os.path.join(self.options.tmpdir, 'extern/w6')) # External, not in the wallet dir, so we need to avoid adding it to in_wallet_dir
to_load = [self.default_wallet_name]
if not self.options.descriptors:
to_load.append('w8')
wallet_names = to_create + to_load # Wallet names loaded in the wallet
in_wallet_dir += to_load # The loaded wallets are also in the wallet dir
self.start_node(0)
for wallet_name in to_create:
self.nodes[0].createwallet(wallet_name)
for wallet_name in to_load:
self.nodes[0].loadwallet(wallet_name)
os.mkdir(wallet_dir('no_access'))
os.chmod(wallet_dir('no_access'), 0)
try:
with self.nodes[0].assert_debug_log(expected_msgs=['Too many levels of symbolic links', 'Error scanning']):
walletlist = self.nodes[0].listwalletdir()['wallets']
finally:
# Need to ensure access is restored for cleanup
os.chmod(wallet_dir('no_access'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
assert_equal(sorted(map(lambda w: w['name'], walletlist)), sorted(in_wallet_dir))
assert_equal(set(node.listwallets()), set(wallet_names))
# should raise rpc error if wallet path can't be created
err_code = -4 if self.options.descriptors else -1
assert_raises_rpc_error(err_code, "boost::filesystem::create_directory:", self.nodes[0].createwallet, "w8/bad")
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
self.start_node(0, ['-wallet=w1', '-wallet=w1'])
self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.')
if not self.options.descriptors:
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
in_wallet_dir.append('w8_copy')
exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet'], "Error: Error parsing command line arguments: Invalid parameter -upgradewallet")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0)
self.nodes[0].createwallet("w4")
self.nodes[0].createwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
node.generatetoaddress(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-nowallet', '-walletdir=' + data_dir()])
self.nodes[0].loadwallet("w4")
self.nodes[0].loadwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-nowallet', '-walletdir=' + competing_wallet_dir])
self.nodes[0].createwallet(self.default_wallet_name)
if self.options.descriptors:
exp_stderr = r"Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another bitcoind?"
else:
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\S*\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0)
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
node.generatetoaddress(nblocks=101, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(0.001)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2], ))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallets")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path), self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", "wallet.dat")
if self.options.descriptors:
assert_raises_rpc_error(-4, "Wallet file verification failed. SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another bitcoind?", self.nodes[0].loadwallet, wallet_names[0])
else:
assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, wallet_names[0])
# This tests the default wallet that BDB makes, so SQLite wallet doesn't need to test this
# Fail to load duplicate wallets by different ways (directory and filepath)
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallet.dat")
assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, 'wallet.dat')
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "empty_wallet_dir")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Data is not in recognized format.".format(path), self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w2")
assert_raises_rpc_error(-4, "Failed to create database path '{}'. Database already exists.".format(path), self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
in_wallet_dir.append('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "Both the RPC endpoint wallet and wallet_name parameter were provided (only one allowed)", w1.unloadwallet, "w2"),
assert_raises_rpc_error(-8, "Both the RPC endpoint wallet and wallet_name parameter were provided (only one allowed)", w1.unloadwallet, "w1"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
if os.path.exists(backup):
os.unlink(backup)
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_created_wallet if wallet_name == self.default_wallet_name else empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
if self.options.descriptors:
assert_raises_rpc_error(-4, "Unable to obtain an exclusive lock", self.nodes[1].loadwallet, wallet)
else:
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
|
archive.py | """archive.py - Wrapper around 7z binary for extraction of individual files.
Note:
Every call to ``extract()`` must be followed by a call to ``join()``
"""
import os
import re
import subprocess
import tempfile
import threading
from .utils import which, is_exe
DEFAULT_BIN = "7z"
"""str: Default binary name for 7z"""
class Archive7z(object):
"""Wrapper around 7z binary for extraction of individual files.
Wrapper around 7z binary for extraction of individual files either
to disk or to a named pipe.
Note:
Every call to ``extract()`` must be followed by a call to ``join()``
"""
outpath = ""
_xfilename = ""
_outfile = None
_thread = None
_pipe = False
_subprocess = None
def __init__(self, filename, cmd="", config=None):
"""Construct wrapper around 7z binary.
Note:
``cmd`` is an option parameter and when not specified
the system path will be searched for the 7z binary
Args:
filename (str): filename of 7z archive
cmd (str): path ot 7z binary
config (dict): configuration via json dict
Attributes:
outpath (str): path to extracted file/named pipe
"""
self.filename = filename
if config:
command = config["cmd"];
else:
command = cmd
if not command:
command = which(DEFAULT_BIN)
if not is_exe(command):
raise OSError("Could not find 7z binary")
self.cmd = command
def _make_pipe(self, fifopath):
self.outpath = tempfile.mktemp(suffix=".fifo", dir=fifopath)
try:
os.mkfifo(self.outpath)
except OSError as e:
raise OSError("Failed to create FIFO: {}".format(e))
else:
return self.outpath
def _extract(self):
self._outfile = open(self.outpath, "w")
cmd = [self.cmd, "e", "-so", self.filename, self._xfilename]
self._subprocess = subprocess.Popen(cmd, stdout=self._outfile)
self._subprocess.communicate()
self._outfile.close()
if self._pipe:
os.remove(self.outpath)
self._outfile = None
def list_files(self, ext=None):
"""Return list of files contained in 7z archive.
Args:
ext (str): file extension filter (e.g. ".xml")
Returns:
list: list of files contained in 7z archive
Todo:
handle directories properly
"""
cmd = [self.cmd, "l", self.filename]
self._subprocess = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = self._subprocess.communicate()
list_ = [m[2] for m in re.findall(r"^\d{4}(-\d{2}){2}.*(\d\s+)(.*)$", out.decode('utf8'), re.M)]
list_ = list_[:-1]
if ext:
if not ext[0] == ".":
ext = "." + ext
list_ = [filename for filename in list_ if os.path.splitext(filename)[1] == ext]
return list_
def extract(self, xfilename, outdir, pipe=False):
"""Extract file from 7z achieve to file system or named pipe.
Note:
Every call to ``extract()`` must be followed by a call to ``join()``
Directories are not handles properly yet
Args:
xfilename (str): filename of file to be extracted form archive
outdir (str): directory to which to extract file to
pipe (bool): extract to an automatically named fifo (named pipe)
Returns:
str: path to extracted filename/named pipe
"""
if not os.path.isdir(outdir):
raise OSError("Output directory does not exist {}".format(outdir))
else:
self._pipe = pipe
self.outdir = outdir
self._xfilename = xfilename
filename = os.path.basename(xfilename)
self.outpath = os.path.join(outdir, filename)
if pipe:
self.outpath = self._make_pipe(outdir)
self._thread = threading.Thread(target=self._extract)
self._thread.start()
return self.outpath
def extract_multiple(self, outdir, files=[]):
"""Extract multiple files from 7z archive to file system.
Extract multiple files from 7z archive to the file system
if ``files`` is not specified **all** files will be extracted
Note:
Directories are not handles properly yet
Args:
outdir (str): directory to which to extract file to
files (list): list of files
Returns:
"""
if not files:
files = self.list_files()
for file_ in files:
self.extract(file_, outdir)
self.join()
def join(self):
"""Wait for extraction thread to complete."""
if self._thread:
self._thread.join()
self._thread = None
|
reader.py | import argparse
import json
import logging
import multiprocessing
import os
import re
LOGGER_FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=LOGGER_FORMAT, datefmt='[%H:%M:%S]')
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
arg_parser = argparse.ArgumentParser(
description='Aggregate queries from log files in given log directory and writes results in given output file')
arg_parser.add_argument('--directory', '-d', type=str, help='path to log directory', required=True)
arg_parser.add_argument('--output', '-o', type=str, help='path to output file', default='output.txt')
arg_parser.add_argument('--processors', '-p', type=int, help='number of processors to be used', default=8)
class Reader:
"""
Reader class to read and aggregate statistic from log file
"""
def __init__(self, filename, queue):
"""
:param filename: str - name of currently processed file
:param queue: multiprocessing.Queue - queue to hold results
"""
self.filename = filename
self.queue = queue
self.statistic = {'valid': {}, 'non_valid': {}}
@staticmethod
def get_id_from_query(query_str):
"""
read all ids from query string and returns it as set to avoid duplicated ids
:param query_str: str - query sting from query
:return: None
"""
ids = set()
for data in query_str.split('&'):
tmp = data.split('=')
if tmp[0] == 'id':
ids.add(int(tmp[1]))
return ids
@staticmethod
def get_day_time(timestamp):
"""
:param timestamp: int - UNIX timestamp
:return: int - UNIX timestamp as day (UTC)
"""
return timestamp - timestamp % 86400
def process_query(self, data):
"""
extend current statistic with data from current query
:param data: dict - current query
:return: None
"""
query_ids = self.get_id_from_query(data['query_string'])
is_valid = 'valid' if set(data['ids']) == query_ids else 'non_valid'
day = self.get_day_time(data['timestamp'])
if day not in self.statistic[is_valid]:
self.statistic[is_valid][day] = {'create': 0, 'update': 0, 'delete': 0}
try:
self.statistic[is_valid][day][data['event_type']] += 1
except Exception as e:
log.error("Error while processing 'event_type': {}, file: {}".format(e, self.filename))
def read_log(self):
"""
read log file line by line
:return: dict - query
"""
with open(self.filename, 'r') as f:
for line in f:
try:
data = json.loads(line)
yield data
except Exception as e:
log.error("Error while reading line in log file: {}, file: {}".format(e, self.filename))
def start(self):
"""
process log file
:return: None
"""
log.info("Processing '{}'".format(self.filename))
for data in self.read_log():
try:
self.process_query(data)
except Exception as e:
log.error("Error while reading line in log file: {}, file: {}".format(e, self.filename))
self.queue.put(self.statistic)
log.info("Processing '{}' finished".format(self.filename))
def is_log_file(filename):
"""
Check file to be log file
:param filename: str
:return: bool
"""
if re.fullmatch(r'^\d+.log$', filename):
return True
return False
def process_files(files, queue):
"""
:param files: list - list of files to be processed in multiprocessing
:param queue: multiprocessing.Queue
:return: list - list of dictionaries with collected data
"""
jobs = []
data = []
for cur_file in files:
r = Reader(os.path.join(cur_file), queue)
p = multiprocessing.Process(target=r.start)
p.start()
jobs.append(p)
for p in jobs:
data.append(queue.get())
p.join()
return data
def update_result(res, new_data):
"""
update current result dictionary with newly collected
:param res: dict
:param new_data: dict
:return:
"""
if not res:
res = new_data
return res
for is_valid, days in new_data.items():
for day, events in days.items():
if day not in res[is_valid]:
res[is_valid][day] = events
continue
for event, quantity in events.items():
res[is_valid][day][event] += quantity
return res
def main(directory, output, processors):
"""
main code here. Scan directory via os.scandir (to use iterator) and process batch of log files, where size of batch
equal to quantity of processors passed with sys.args
:param directory: str - name of log files directory
:param output: str - name of output file
:param processors: int - quantity of workers to process simultaneously
:return: None
"""
result = {}
queue = multiprocessing.Queue()
files_to_process = []
for file in os.scandir(directory):
if not is_log_file(file.name):
continue
files_to_process.append(file)
if len(files_to_process) < processors:
continue
for data in process_files(files_to_process, queue):
result = update_result(result, data)
files_to_process = []
if files_to_process:
for data in process_files(files_to_process, queue):
result = update_result(result, data)
with open(output, 'w') as f:
f.write(json.dumps(result, indent=4, sort_keys=True))
if __name__ == '__main__':
args = arg_parser.parse_args()
main(args.directory, args.output, args.processors)
|
Misc.py | ## @file
# Common routines used by all tools
#
# Copyright (c) 2007 - 2019, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
import sys
import string
import threading
import time
import re
import pickle
import array
import shutil
import filecmp
from random import sample
from struct import pack
import uuid
import subprocess
import tempfile
from collections import OrderedDict
import Common.LongFilePathOs as os
from Common import EdkLogger as EdkLogger
from Common import GlobalData as GlobalData
from Common.DataType import *
from Common.BuildToolError import *
from CommonDataClass.DataClass import *
from Common.Parsing import GetSplitValueList
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
from CommonDataClass.Exceptions import BadExpression
from Common.caching import cached_property
ArrayIndex = re.compile("\[\s*[0-9a-fA-FxX]*\s*\]")
## Regular expression used to find out place holders in string template
gPlaceholderPattern = re.compile("\$\{([^$()\s]+)\}", re.MULTILINE | re.UNICODE)
## regular expressions for map file processing
startPatternGeneral = re.compile("^Start[' ']+Length[' ']+Name[' ']+Class")
addressPatternGeneral = re.compile("^Address[' ']+Publics by Value[' ']+Rva\+Base")
valuePatternGcc = re.compile('^([\w_\.]+) +([\da-fA-Fx]+) +([\da-fA-Fx]+)$')
pcdPatternGcc = re.compile('^([\da-fA-Fx]+) +([\da-fA-Fx]+)')
secReGeneral = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\da-fA-F]+)[Hh]? +([.\w\$]+) +(\w+)', re.UNICODE)
StructPattern = re.compile(r'[_a-zA-Z][0-9A-Za-z_]*$')
## Dictionary used to store dependencies of files
gDependencyDatabase = {} # arch : {file path : [dependent files list]}
#
# If a module is built more than once with different PCDs or library classes
# a temporary INF file with same content is created, the temporary file is removed
# when build exits.
#
_TempInfs = []
def GetVariableOffset(mapfilepath, efifilepath, varnames):
""" Parse map file to get variable offset in current EFI file
@param mapfilepath Map file absolution path
@param efifilepath: EFI binary file full path
@param varnames iteratable container whose elements are variable names to be searched
@return List whos elements are tuple with variable name and raw offset
"""
lines = []
try:
f = open(mapfilepath, 'r')
lines = f.readlines()
f.close()
except:
return None
if len(lines) == 0: return None
firstline = lines[0].strip()
if (firstline.startswith("Archive member included ") and
firstline.endswith(" file (symbol)")):
return _parseForGCC(lines, efifilepath, varnames)
if firstline.startswith("# Path:"):
return _parseForXcode(lines, efifilepath, varnames)
return _parseGeneral(lines, efifilepath, varnames)
def _parseForXcode(lines, efifilepath, varnames):
status = 0
ret = []
for line in lines:
line = line.strip()
if status == 0 and line == "# Symbols:":
status = 1
continue
if status == 1 and len(line) != 0:
for varname in varnames:
if varname in line:
# cannot pregenerate this RegEx since it uses varname from varnames.
m = re.match('^([\da-fA-FxX]+)([\s\S]*)([_]*%s)$' % varname, line)
if m is not None:
ret.append((varname, m.group(1)))
return ret
def _parseForGCC(lines, efifilepath, varnames):
""" Parse map file generated by GCC linker """
status = 0
sections = []
varoffset = []
for index, line in enumerate(lines):
line = line.strip()
# status machine transection
if status == 0 and line == "Memory Configuration":
status = 1
continue
elif status == 1 and line == 'Linker script and memory map':
status = 2
continue
elif status ==2 and line == 'START GROUP':
status = 3
continue
# status handler
if status == 3:
m = valuePatternGcc.match(line)
if m is not None:
sections.append(m.groups(0))
for varname in varnames:
Str = ''
m = re.match("^.data.(%s)" % varname, line)
if m is not None:
m = re.match(".data.(%s)$" % varname, line)
if m is not None:
Str = lines[index + 1]
else:
Str = line[len(".data.%s" % varname):]
if Str:
m = pcdPatternGcc.match(Str.strip())
if m is not None:
varoffset.append((varname, int(m.groups(0)[0], 16), int(sections[-1][1], 16), sections[-1][0]))
if not varoffset:
return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs is None or len(efisecs) == 0:
return []
#redirection
redirection = 0
for efisec in efisecs:
for section in sections:
if section[0].strip() == efisec[0].strip() and section[0].strip() == '.text':
redirection = int(section[1], 16) - efisec[1]
ret = []
for var in varoffset:
for efisec in efisecs:
if var[1] >= efisec[1] and var[1] < efisec[1]+efisec[3]:
ret.append((var[0], hex(efisec[2] + var[1] - efisec[1] - redirection)))
return ret
def _parseGeneral(lines, efifilepath, varnames):
status = 0 #0 - beginning of file; 1 - PE section definition; 2 - symbol table
secs = [] # key = section name
varoffset = []
symRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\.:\\\\\w\?@\$-]+) +([\da-fA-F]+)', re.UNICODE)
for line in lines:
line = line.strip()
if startPatternGeneral.match(line):
status = 1
continue
if addressPatternGeneral.match(line):
status = 2
continue
if line.startswith("entry point at"):
status = 3
continue
if status == 1 and len(line) != 0:
m = secReGeneral.match(line)
assert m is not None, "Fail to parse the section in map file , line is %s" % line
sec_no, sec_start, sec_length, sec_name, sec_class = m.groups(0)
secs.append([int(sec_no, 16), int(sec_start, 16), int(sec_length, 16), sec_name, sec_class])
if status == 2 and len(line) != 0:
for varname in varnames:
m = symRe.match(line)
assert m is not None, "Fail to parse the symbol in map file, line is %s" % line
sec_no, sym_offset, sym_name, vir_addr = m.groups(0)
sec_no = int(sec_no, 16)
sym_offset = int(sym_offset, 16)
vir_addr = int(vir_addr, 16)
# cannot pregenerate this RegEx since it uses varname from varnames.
m2 = re.match('^[_]*(%s)' % varname, sym_name)
if m2 is not None:
# fond a binary pcd entry in map file
for sec in secs:
if sec[0] == sec_no and (sym_offset >= sec[1] and sym_offset < sec[1] + sec[2]):
varoffset.append([varname, sec[3], sym_offset, vir_addr, sec_no])
if not varoffset: return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs is None or len(efisecs) == 0:
return []
ret = []
for var in varoffset:
index = 0
for efisec in efisecs:
index = index + 1
if var[1].strip() == efisec[0].strip():
ret.append((var[0], hex(efisec[2] + var[2])))
elif var[4] == index:
ret.append((var[0], hex(efisec[2] + var[2])))
return ret
## Routine to process duplicated INF
#
# This function is called by following two cases:
# Case 1 in DSC:
# [components.arch]
# Pkg/module/module.inf
# Pkg/module/module.inf {
# <Defines>
# FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836
# }
# Case 2 in FDF:
# INF Pkg/module/module.inf
# INF FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836 Pkg/module/module.inf
#
# This function copies Pkg/module/module.inf to
# Conf/.cache/0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf
#
# @param Path Original PathClass object
# @param BaseName New file base name
#
# @retval return the new PathClass object
#
def ProcessDuplicatedInf(Path, BaseName, Workspace):
Filename = os.path.split(Path.File)[1]
if '.' in Filename:
Filename = BaseName + Path.BaseName + Filename[Filename.rfind('.'):]
else:
Filename = BaseName + Path.BaseName
DbDir = os.path.split(GlobalData.gDatabasePath)[0]
#
# A temporary INF is copied to database path which must have write permission
# The temporary will be removed at the end of build
# In case of name conflict, the file name is
# FILE_GUIDBaseName (0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf)
#
TempFullPath = os.path.join(DbDir,
Filename)
RtPath = PathClass(Path.File, Workspace)
#
# Modify the full path to temporary path, keep other unchanged
#
# To build same module more than once, the module path with FILE_GUID overridden has
# the file name FILE_GUIDmodule.inf, but the relative path (self.MetaFile.File) is the real path
# in DSC which is used as relative path by C files and other files in INF.
# A trick was used: all module paths are PathClass instances, after the initialization
# of PathClass, the PathClass.Path is overridden by the temporary INF path.
#
# The reason for creating a temporary INF is:
# Platform.Modules which is the base to create ModuleAutoGen objects is a dictionary,
# the key is the full path of INF, the value is an object to save overridden library instances, PCDs.
# A different key for the same module is needed to create different output directory,
# retrieve overridden PCDs, library instances.
#
# The BaseName is the FILE_GUID which is also the output directory name.
#
#
RtPath.Path = TempFullPath
RtPath.BaseName = BaseName
RtPath.OriginalPath = Path
#
# If file exists, compare contents
#
if os.path.exists(TempFullPath):
with open(str(Path), 'rb') as f1, open(TempFullPath, 'rb') as f2:
if f1.read() == f2.read():
return RtPath
_TempInfs.append(TempFullPath)
shutil.copy2(str(Path), TempFullPath)
return RtPath
## Remove temporary created INFs whose paths were saved in _TempInfs
#
def ClearDuplicatedInf():
while _TempInfs:
File = _TempInfs.pop()
if os.path.exists(File):
os.remove(File)
## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C structure style
#
# @param Guid The GUID string
#
# @retval string The GUID string in C structure style
#
def GuidStringToGuidStructureString(Guid):
GuidList = Guid.split('-')
Result = '{'
for Index in range(0, 3, 1):
Result = Result + '0x' + GuidList[Index] + ', '
Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]
for Index in range(0, 12, 2):
Result = Result + ', 0x' + GuidList[4][Index:Index + 2]
Result += '}}'
return Result
## Convert GUID structure in byte array to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in byte array
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureByteArrayToGuidString(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 16:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[3], 16),
int(guidValueList[2], 16),
int(guidValueList[1], 16),
int(guidValueList[0], 16),
int(guidValueList[5], 16),
int(guidValueList[4], 16),
int(guidValueList[7], 16),
int(guidValueList[6], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16),
int(guidValueList[11], 16),
int(guidValueList[12], 16),
int(guidValueList[13], 16),
int(guidValueList[14], 16),
int(guidValueList[15], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureStringToGuidString(GuidValue):
if not GlobalData.gGuidCFormatPattern.match(GuidValue):
return ''
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx format
#
def GuidStructureStringToGuidValueName(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
EdkLogger.error(None, FORMAT_INVALID, "Invalid GUID value string [%s]" % GuidValue)
return "%08x_%04x_%04x_%02x%02x_%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
## Create directories
#
# @param Directory The directory name
#
def CreateDirectory(Directory):
if Directory is None or Directory.strip() == "":
return True
try:
if not os.access(Directory, os.F_OK):
os.makedirs(Directory)
except:
return False
return True
## Remove directories, including files and sub-directories in it
#
# @param Directory The directory name
#
def RemoveDirectory(Directory, Recursively=False):
if Directory is None or Directory.strip() == "" or not os.path.exists(Directory):
return
if Recursively:
CurrentDirectory = os.getcwd()
os.chdir(Directory)
for File in os.listdir("."):
if os.path.isdir(File):
RemoveDirectory(File, Recursively)
else:
os.remove(File)
os.chdir(CurrentDirectory)
os.rmdir(Directory)
## Store content in file
#
# This method is used to save file only when its content is changed. This is
# quite useful for "make" system to decide what will be re-built and what won't.
#
# @param File The path of file
# @param Content The new content of the file
# @param IsBinaryFile The flag indicating if the file is binary file or not
#
# @retval True If the file content is changed and the file is renewed
# @retval False If the file content is the same
#
def SaveFileOnChange(File, Content, IsBinaryFile=True, FileLock=None):
if os.path.exists(File):
if IsBinaryFile:
try:
with open(File, "rb") as f:
if Content == f.read():
return False
except:
EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
else:
try:
with open(File, "r") as f:
if Content == f.read():
return False
except:
EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
DirName = os.path.dirname(File)
if not CreateDirectory(DirName):
EdkLogger.error(None, FILE_CREATE_FAILURE, "Could not create directory %s" % DirName)
else:
if DirName == '':
DirName = os.getcwd()
if not os.access(DirName, os.W_OK):
EdkLogger.error(None, PERMISSION_FAILURE, "Do not have write permission on directory %s" % DirName)
OpenMode = "w"
if IsBinaryFile:
OpenMode = "wb"
# use default file_lock if no input new lock
if not FileLock:
FileLock = GlobalData.file_lock
if FileLock:
FileLock.acquire()
if GlobalData.gIsWindows and not os.path.exists(File):
# write temp file, then rename the temp file to the real file
# to make sure the file be immediate saved to disk
with tempfile.NamedTemporaryFile(OpenMode, dir=os.path.dirname(File), delete=False) as tf:
tf.write(Content)
tempname = tf.name
try:
os.rename(tempname, File)
except IOError as X:
if GlobalData.gBinCacheSource:
EdkLogger.quiet("[cache error]:fails to save file with error: %s" % (X))
else:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)
finally:
if FileLock:
FileLock.release()
else:
try:
with open(File, OpenMode) as Fd:
Fd.write(Content)
except IOError as X:
if GlobalData.gBinCacheSource:
EdkLogger.quiet("[cache error]:fails to save file with error: %s" % (X))
else:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)
finally:
if FileLock:
FileLock.release()
return True
## Copy source file only if it is different from the destination file
#
# This method is used to copy file only if the source file and destination
# file content are different. This is quite useful to avoid duplicated
# file writing.
#
# @param SrcFile The path of source file
# @param Dst The path of destination file or folder
#
# @retval True The two files content are different and the file is copied
# @retval False No copy really happen
#
def CopyFileOnChange(SrcFile, Dst, FileLock=None):
if not os.path.exists(SrcFile):
return False
if os.path.isdir(Dst):
DstFile = os.path.join(Dst, os.path.basename(SrcFile))
else:
DstFile = Dst
if os.path.exists(DstFile) and filecmp.cmp(SrcFile, DstFile, shallow=False):
return False
DirName = os.path.dirname(DstFile)
if not CreateDirectory(DirName):
EdkLogger.error(None, FILE_CREATE_FAILURE, "Could not create directory %s" % DirName)
else:
if DirName == '':
DirName = os.getcwd()
if not os.access(DirName, os.W_OK):
EdkLogger.error(None, PERMISSION_FAILURE, "Do not have write permission on directory %s" % DirName)
# use default file_lock if no input new lock
if not FileLock:
FileLock = GlobalData.file_lock
if FileLock:
FileLock.acquire()
# os.replace and os.rename are the atomic operations in python 3 and 2.
# we use these two atomic operations to ensure the file copy is atomic:
# copy the src to a temp file in the dst same folder firstly, then
# replace or rename the temp file to the destination file.
with tempfile.NamedTemporaryFile(dir=DirName, delete=False) as tf:
shutil.copy(SrcFile, tf.name)
tempname = tf.name
try:
if hasattr(os, 'replace'):
os.replace(tempname, DstFile)
else:
# os.rename reqire to remove the dst on Windows, otherwise OSError will be raised.
if GlobalData.gIsWindows and os.path.exists(DstFile):
os.remove(DstFile)
os.rename(tempname, DstFile)
except IOError as X:
if GlobalData.gBinCacheSource:
EdkLogger.quiet("[cache error]:fails to copy file with error: %s" % (X))
else:
EdkLogger.error(None, FILE_COPY_FAILURE, ExtraData='IOError %s' % X)
finally:
if FileLock:
FileLock.release()
return True
## Retrieve and cache the real path name in file system
#
# @param Root The root directory of path relative to
#
# @retval str The path string if the path exists
# @retval None If path doesn't exist
#
class DirCache:
_CACHE_ = set()
_UPPER_CACHE_ = {}
def __init__(self, Root):
self._Root = Root
for F in os.listdir(Root):
self._CACHE_.add(F)
self._UPPER_CACHE_[F.upper()] = F
# =[] operator
def __getitem__(self, Path):
Path = Path[len(os.path.commonprefix([Path, self._Root])):]
if not Path:
return self._Root
if Path and Path[0] == os.path.sep:
Path = Path[1:]
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
UpperPath = Path.upper()
if UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
IndexList = []
LastSepIndex = -1
SepIndex = Path.find(os.path.sep)
while SepIndex > -1:
Parent = UpperPath[:SepIndex]
if Parent not in self._UPPER_CACHE_:
break
LastSepIndex = SepIndex
SepIndex = Path.find(os.path.sep, LastSepIndex + 1)
if LastSepIndex == -1:
return None
Cwd = os.getcwd()
os.chdir(self._Root)
SepIndex = LastSepIndex
while SepIndex > -1:
Parent = Path[:SepIndex]
ParentKey = UpperPath[:SepIndex]
if ParentKey not in self._UPPER_CACHE_:
os.chdir(Cwd)
return None
if Parent in self._CACHE_:
ParentDir = Parent
else:
ParentDir = self._UPPER_CACHE_[ParentKey]
for F in os.listdir(ParentDir):
Dir = os.path.join(ParentDir, F)
self._CACHE_.add(Dir)
self._UPPER_CACHE_[Dir.upper()] = Dir
SepIndex = Path.find(os.path.sep, SepIndex + 1)
os.chdir(Cwd)
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
elif UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
return None
def RealPath(File, Dir='', OverrideDir=''):
NewFile = os.path.normpath(os.path.join(Dir, File))
NewFile = GlobalData.gAllFiles[NewFile]
if not NewFile and OverrideDir:
NewFile = os.path.normpath(os.path.join(OverrideDir, File))
NewFile = GlobalData.gAllFiles[NewFile]
return NewFile
## Get GUID value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def GuidValue(CName, PackageList, Inffile = None):
for P in PackageList:
GuidKeys = list(P.Guids.keys())
if Inffile and P._PrivateGuids:
if not Inffile.startswith(P.MetaFile.Dir):
GuidKeys = [x for x in P.Guids if x not in P._PrivateGuids]
if CName in GuidKeys:
return P.Guids[CName]
return None
## A string template class
#
# This class implements a template for string replacement. A string template
# looks like following
#
# ${BEGIN} other_string ${placeholder_name} other_string ${END}
#
# The string between ${BEGIN} and ${END} will be repeated as many times as the
# length of "placeholder_name", which is a list passed through a dict. The
# "placeholder_name" is the key name of the dict. The ${BEGIN} and ${END} can
# be not used and, in this case, the "placeholder_name" must not a list and it
# will just be replaced once.
#
class TemplateString(object):
_REPEAT_START_FLAG = "BEGIN"
_REPEAT_END_FLAG = "END"
class Section(object):
_LIST_TYPES = [type([]), type(set()), type((0,))]
def __init__(self, TemplateSection, PlaceHolderList):
self._Template = TemplateSection
self._PlaceHolderList = []
# Split the section into sub-sections according to the position of placeholders
if PlaceHolderList:
self._SubSectionList = []
SubSectionStart = 0
#
# The placeholders passed in must be in the format of
#
# PlaceHolderName, PlaceHolderStartPoint, PlaceHolderEndPoint
#
for PlaceHolder, Start, End in PlaceHolderList:
self._SubSectionList.append(TemplateSection[SubSectionStart:Start])
self._SubSectionList.append(TemplateSection[Start:End])
self._PlaceHolderList.append(PlaceHolder)
SubSectionStart = End
if SubSectionStart < len(TemplateSection):
self._SubSectionList.append(TemplateSection[SubSectionStart:])
else:
self._SubSectionList = [TemplateSection]
def __str__(self):
return self._Template + " : " + str(self._PlaceHolderList)
def Instantiate(self, PlaceHolderValues):
RepeatTime = -1
RepeatPlaceHolders = {}
NonRepeatPlaceHolders = {}
for PlaceHolder in self._PlaceHolderList:
if PlaceHolder not in PlaceHolderValues:
continue
Value = PlaceHolderValues[PlaceHolder]
if type(Value) in self._LIST_TYPES:
if RepeatTime < 0:
RepeatTime = len(Value)
elif RepeatTime != len(Value):
EdkLogger.error(
"TemplateString",
PARAMETER_INVALID,
"${%s} has different repeat time from others!" % PlaceHolder,
ExtraData=str(self._Template)
)
RepeatPlaceHolders["${%s}" % PlaceHolder] = Value
else:
NonRepeatPlaceHolders["${%s}" % PlaceHolder] = Value
if NonRepeatPlaceHolders:
StringList = []
for S in self._SubSectionList:
if S not in NonRepeatPlaceHolders:
StringList.append(S)
else:
StringList.append(str(NonRepeatPlaceHolders[S]))
else:
StringList = self._SubSectionList
if RepeatPlaceHolders:
TempStringList = []
for Index in range(RepeatTime):
for S in StringList:
if S not in RepeatPlaceHolders:
TempStringList.append(S)
else:
TempStringList.append(str(RepeatPlaceHolders[S][Index]))
StringList = TempStringList
return "".join(StringList)
## Constructor
def __init__(self, Template=None):
self.String = []
self.IsBinary = False
self._Template = Template
self._TemplateSectionList = self._Parse(Template)
## str() operator
#
# @retval string The string replaced
#
def __str__(self):
return "".join(self.String)
## Split the template string into fragments per the ${BEGIN} and ${END} flags
#
# @retval list A list of TemplateString.Section objects
#
def _Parse(self, Template):
SectionStart = 0
SearchFrom = 0
MatchEnd = 0
PlaceHolderList = []
TemplateSectionList = []
while Template:
MatchObj = gPlaceholderPattern.search(Template, SearchFrom)
if not MatchObj:
if MatchEnd <= len(Template):
TemplateSection = TemplateString.Section(Template[SectionStart:], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
break
MatchString = MatchObj.group(1)
MatchStart = MatchObj.start()
MatchEnd = MatchObj.end()
if MatchString == self._REPEAT_START_FLAG:
if MatchStart > SectionStart:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
elif MatchString == self._REPEAT_END_FLAG:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
else:
PlaceHolderList.append((MatchString, MatchStart - SectionStart, MatchEnd - SectionStart))
SearchFrom = MatchEnd
return TemplateSectionList
## Replace the string template with dictionary of placeholders and append it to previous one
#
# @param AppendString The string template to append
# @param Dictionary The placeholder dictionaries
#
def Append(self, AppendString, Dictionary=None):
if Dictionary:
SectionList = self._Parse(AppendString)
self.String.append( "".join(S.Instantiate(Dictionary) for S in SectionList))
else:
if isinstance(AppendString,list):
self.String.extend(AppendString)
else:
self.String.append(AppendString)
## Replace the string template with dictionary of placeholders
#
# @param Dictionary The placeholder dictionaries
#
# @retval str The string replaced with placeholder values
#
def Replace(self, Dictionary=None):
return "".join(S.Instantiate(Dictionary) for S in self._TemplateSectionList)
## Progress indicator class
#
# This class makes use of thread to print progress on console.
#
class Progressor:
# for avoiding deadloop
_StopFlag = None
_ProgressThread = None
_CheckInterval = 0.25
## Constructor
#
# @param OpenMessage The string printed before progress characters
# @param CloseMessage The string printed after progress characters
# @param ProgressChar The character used to indicate the progress
# @param Interval The interval in seconds between two progress characters
#
def __init__(self, OpenMessage="", CloseMessage="", ProgressChar='.', Interval=1.0):
self.PromptMessage = OpenMessage
self.CodaMessage = CloseMessage
self.ProgressChar = ProgressChar
self.Interval = Interval
if Progressor._StopFlag is None:
Progressor._StopFlag = threading.Event()
## Start to print progress character
#
# @param OpenMessage The string printed before progress characters
#
def Start(self, OpenMessage=None):
if OpenMessage is not None:
self.PromptMessage = OpenMessage
Progressor._StopFlag.clear()
if Progressor._ProgressThread is None:
Progressor._ProgressThread = threading.Thread(target=self._ProgressThreadEntry)
Progressor._ProgressThread.setDaemon(False)
Progressor._ProgressThread.start()
## Stop printing progress character
#
# @param CloseMessage The string printed after progress characters
#
def Stop(self, CloseMessage=None):
OriginalCodaMessage = self.CodaMessage
if CloseMessage is not None:
self.CodaMessage = CloseMessage
self.Abort()
self.CodaMessage = OriginalCodaMessage
## Thread entry method
def _ProgressThreadEntry(self):
sys.stdout.write(self.PromptMessage + " ")
sys.stdout.flush()
TimeUp = 0.0
while not Progressor._StopFlag.isSet():
if TimeUp <= 0.0:
sys.stdout.write(self.ProgressChar)
sys.stdout.flush()
TimeUp = self.Interval
time.sleep(self._CheckInterval)
TimeUp -= self._CheckInterval
sys.stdout.write(" " + self.CodaMessage + "\n")
sys.stdout.flush()
## Abort the progress display
@staticmethod
def Abort():
if Progressor._StopFlag is not None:
Progressor._StopFlag.set()
if Progressor._ProgressThread is not None:
Progressor._ProgressThread.join()
Progressor._ProgressThread = None
## Dictionary using prioritized list as key
#
class tdict:
_ListType = type([])
_TupleType = type(())
_Wildcard = 'COMMON'
_ValidWildcardList = ['COMMON', 'DEFAULT', 'ALL', TAB_STAR, 'PLATFORM']
def __init__(self, _Single_=False, _Level_=2):
self._Level_ = _Level_
self.data = {}
self._Single_ = _Single_
# =[] operator
def __getitem__(self, key):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
elif self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey is None or str(FirstKey).upper() in self._ValidWildcardList:
FirstKey = self._Wildcard
if self._Single_:
return self._GetSingleValue(FirstKey, RestKeys)
else:
return self._GetAllValues(FirstKey, RestKeys)
def _GetSingleValue(self, FirstKey, RestKeys):
Value = None
#print "%s-%s" % (FirstKey, self._Level_) ,
if self._Level_ > 1:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value is None:
for Key in self.data:
Value = self.data[Key][RestKeys]
if Value is not None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value is None and self._Wildcard in self.data:
#print "Value=None"
Value = self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey]
if Value is None:
for Key in self.data:
Value = self.data[Key]
if Value is not None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey]
elif self._Wildcard in self.data:
Value = self.data[self._Wildcard]
return Value
def _GetAllValues(self, FirstKey, RestKeys):
Value = []
if self._Level_ > 1:
if FirstKey == self._Wildcard:
for Key in self.data:
Value += self.data[Key][RestKeys]
else:
if FirstKey in self.data:
Value += self.data[FirstKey][RestKeys]
if self._Wildcard in self.data:
Value += self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
for Key in self.data:
Value.append(self.data[Key])
else:
if FirstKey in self.data:
Value.append(self.data[FirstKey])
if self._Wildcard in self.data:
Value.append(self.data[self._Wildcard])
return Value
## []= operator
def __setitem__(self, key, value):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
else:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey in self._ValidWildcardList:
FirstKey = self._Wildcard
if FirstKey not in self.data and self._Level_ > 0:
self.data[FirstKey] = tdict(self._Single_, self._Level_ - 1)
if self._Level_ > 1:
self.data[FirstKey][RestKeys] = value
else:
self.data[FirstKey] = value
def SetGreedyMode(self):
self._Single_ = False
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetGreedyMode()
def SetSingleMode(self):
self._Single_ = True
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetSingleMode()
def GetKeys(self, KeyIndex=0):
assert KeyIndex >= 0
if KeyIndex == 0:
return set(self.data.keys())
else:
keys = set()
for Key in self.data:
keys |= self.data[Key].GetKeys(KeyIndex - 1)
return keys
def AnalyzePcdExpression(Setting):
RanStr = ''.join(sample(string.ascii_letters + string.digits, 8))
Setting = Setting.replace('\\\\', RanStr).strip()
# There might be escaped quote in a string: \", \\\" , \', \\\'
Data = Setting
# There might be '|' in string and in ( ... | ... ), replace it with '-'
NewStr = ''
InSingleQuoteStr = False
InDoubleQuoteStr = False
Pair = 0
for Index, ch in enumerate(Data):
if ch == '"' and not InSingleQuoteStr:
if Data[Index - 1] != '\\':
InDoubleQuoteStr = not InDoubleQuoteStr
elif ch == "'" and not InDoubleQuoteStr:
if Data[Index - 1] != '\\':
InSingleQuoteStr = not InSingleQuoteStr
elif ch == '(' and not (InSingleQuoteStr or InDoubleQuoteStr):
Pair += 1
elif ch == ')' and not (InSingleQuoteStr or InDoubleQuoteStr):
Pair -= 1
if (Pair > 0 or InSingleQuoteStr or InDoubleQuoteStr) and ch == TAB_VALUE_SPLIT:
NewStr += '-'
else:
NewStr += ch
FieldList = []
StartPos = 0
while True:
Pos = NewStr.find(TAB_VALUE_SPLIT, StartPos)
if Pos < 0:
FieldList.append(Setting[StartPos:].strip())
break
FieldList.append(Setting[StartPos:Pos].strip())
StartPos = Pos + 1
for i, ch in enumerate(FieldList):
if RanStr in ch:
FieldList[i] = ch.replace(RanStr,'\\\\')
return FieldList
def ParseFieldValue (Value):
def ParseDevPathValue (Value):
if '\\' in Value:
Value.replace('\\', '/').replace(' ', '')
Cmd = 'DevicePath ' + '"' + Value + '"'
try:
p = subprocess.Popen(Cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
except Exception as X:
raise BadExpression("DevicePath: %s" % (str(X)) )
finally:
subprocess._cleanup()
p.stdout.close()
p.stderr.close()
if err:
raise BadExpression("DevicePath: %s" % str(err))
out = out.decode()
Size = len(out.split())
out = ','.join(out.split())
return '{' + out + '}', Size
if "{CODE(" in Value:
return Value, len(Value.split(","))
if isinstance(Value, type(0)):
return Value, (Value.bit_length() + 7) // 8
if not isinstance(Value, type('')):
raise BadExpression('Type %s is %s' %(Value, type(Value)))
Value = Value.strip()
if Value.startswith(TAB_UINT8) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 1:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 1
if Value.startswith(TAB_UINT16) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 2:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 2
if Value.startswith(TAB_UINT32) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 4:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 4
if Value.startswith(TAB_UINT64) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 8:
raise BadExpression('Value (%s) Size larger than %d' % (Value, Size))
return Value, 8
if Value.startswith(TAB_GUID) and Value.endswith(')'):
Value = Value.split('(', 1)[1][:-1].strip()
if Value[0] == '{' and Value[-1] == '}':
TmpValue = GuidStructureStringToGuidString(Value)
if not TmpValue:
raise BadExpression("Invalid GUID value string %s" % Value)
Value = TmpValue
if Value[0] == '"' and Value[-1] == '"':
Value = Value[1:-1]
try:
Value = str(uuid.UUID(Value).bytes_le)
if Value.startswith("b'"):
Value = Value[2:-1]
Value = "'" + Value + "'"
except ValueError as Message:
raise BadExpression(Message)
Value, Size = ParseFieldValue(Value)
return Value, 16
if Value.startswith('L"') and Value.endswith('"'):
# Unicode String
# translate escape character
Value = Value[1:]
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, (len(List) + 1) * 2
if Value.startswith('"') and Value.endswith('"'):
# ASCII String
# translate escape character
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List) + 1
if Value.startswith("L'") and Value.endswith("'"):
# Unicode Character Constant
# translate escape character
Value = Value[1:]
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
if len(List) == 0:
raise BadExpression('Length %s is %s' % (Value, len(List)))
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, len(List) * 2
if Value.startswith("'") and Value.endswith("'"):
# Character constant
# translate escape character
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
if len(List) == 0:
raise BadExpression('Length %s is %s' % (Value, len(List)))
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List)
if Value.startswith('{') and Value.endswith('}'):
# Byte array
Value = Value[1:-1]
List = [Item.strip() for Item in Value.split(',')]
List.reverse()
Value = 0
RetSize = 0
for Item in List:
ItemValue, Size = ParseFieldValue(Item)
RetSize += Size
for I in range(Size):
Value = (Value << 8) | ((ItemValue >> 8 * I) & 0xff)
return Value, RetSize
if Value.startswith('DEVICE_PATH(') and Value.endswith(')'):
Value = Value.replace("DEVICE_PATH(", '').rstrip(')')
Value = Value.strip().strip('"')
return ParseDevPathValue(Value)
if Value.lower().startswith('0x'):
try:
Value = int(Value, 16)
except:
raise BadExpression("invalid hex value: %s" % Value)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) // 8
if Value[0].isdigit():
Value = int(Value, 10)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) // 8
if Value.lower() == 'true':
return 1, 1
if Value.lower() == 'false':
return 0, 1
return Value, 1
## AnalyzeDscPcd
#
# Analyze DSC PCD value, since there is no data type info in DSC
# This function is used to match functions (AnalyzePcdData) used for retrieving PCD value from database
# 1. Feature flag: TokenSpace.PcdCName|PcdValue
# 2. Fix and Patch:TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# 3. Dynamic default:
# TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# TokenSpace.PcdCName|PcdValue
# 4. Dynamic VPD:
# TokenSpace.PcdCName|VpdOffset[|VpdValue]
# TokenSpace.PcdCName|VpdOffset[|MaxSize[|VpdValue]]
# 5. Dynamic HII:
# TokenSpace.PcdCName|HiiString|VariableGuid|VariableOffset[|HiiValue]
# PCD value needs to be located in such kind of string, and the PCD value might be an expression in which
# there might have "|" operator, also in string value.
#
# @param Setting: String contain information described above with "TokenSpace.PcdCName|" stripped
# @param PcdType: PCD type: feature, fixed, dynamic default VPD HII
# @param DataType: The datum type of PCD: VOID*, UNIT, BOOL
# @retval:
# ValueList: A List contain fields described above
# IsValid: True if conforming EBNF, otherwise False
# Index: The index where PcdValue is in ValueList
#
def AnalyzeDscPcd(Setting, PcdType, DataType=''):
FieldList = AnalyzePcdExpression(Setting)
IsValid = True
if PcdType in (MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_EX_DEFAULT):
Value = FieldList[0]
Size = ''
if len(FieldList) > 1 and FieldList[1]:
DataType = FieldList[1]
if FieldList[1] != TAB_VOID and StructPattern.match(FieldList[1]) is None:
IsValid = False
if len(FieldList) > 2:
Size = FieldList[2]
if IsValid:
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size, 16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [str(Value), DataType, str(Size)], IsValid, 0
elif PcdType == MODEL_PCD_FEATURE_FLAG:
Value = FieldList[0]
Size = ''
IsValid = (len(FieldList) <= 1)
return [Value, DataType, str(Size)], IsValid, 0
elif PcdType in (MODEL_PCD_DYNAMIC_VPD, MODEL_PCD_DYNAMIC_EX_VPD):
VpdOffset = FieldList[0]
Value = Size = ''
if not DataType == TAB_VOID:
if len(FieldList) > 1:
Value = FieldList[1]
else:
if len(FieldList) > 1:
Size = FieldList[1]
if len(FieldList) > 2:
Value = FieldList[2]
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size, 16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [VpdOffset, str(Size), Value], IsValid, 2
elif PcdType in (MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_EX_HII):
IsValid = (3 <= len(FieldList) <= 5)
HiiString = FieldList[0]
Guid = Offset = Value = Attribute = ''
if len(FieldList) > 1:
Guid = FieldList[1]
if len(FieldList) > 2:
Offset = FieldList[2]
if len(FieldList) > 3:
Value = FieldList[3]
if len(FieldList) > 4:
Attribute = FieldList[4]
return [HiiString, Guid, Offset, Value, Attribute], IsValid, 3
return [], False, 0
## AnalyzePcdData
#
# Analyze the pcd Value, Datum type and TokenNumber.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain value/datum type/token number information;
#
# @retval ValueList: A List contain value, datum type and toke number.
#
def AnalyzePcdData(Setting):
ValueList = ['', '', '']
ValueRe = re.compile(r'^\s*L?\".*\|.*\"')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[0] = PtrValue[0]
return ValueList
## check format of PCD value against its the datum type
#
# For PCD value setting
#
def CheckPcdDatum(Type, Value):
if Type == TAB_VOID:
ValueRe = re.compile(r'\s*L?\".*\"\s*$')
if not (((Value.startswith('L"') or Value.startswith('"')) and Value.endswith('"'))
or (Value.startswith('{') and Value.endswith('}')) or (Value.startswith("L'") or Value.startswith("'") and Value.endswith("'"))
):
return False, "Invalid value [%s] of type [%s]; must be in the form of {...} for array"\
", \"...\" or \'...\' for string, L\"...\" or L\'...\' for unicode string" % (Value, Type)
elif ValueRe.match(Value):
# Check the chars in UnicodeString or CString is printable
if Value.startswith("L"):
Value = Value[2:-1]
else:
Value = Value[1:-1]
Printset = set(string.printable)
Printset.remove(TAB_PRINTCHAR_VT)
Printset.add(TAB_PRINTCHAR_BS)
Printset.add(TAB_PRINTCHAR_NUL)
if not set(Value).issubset(Printset):
PrintList = sorted(Printset)
return False, "Invalid PCD string value of type [%s]; must be printable chars %s." % (Type, PrintList)
elif Type == 'BOOLEAN':
if Value not in ['TRUE', 'True', 'true', '0x1', '0x01', '1', 'FALSE', 'False', 'false', '0x0', '0x00', '0']:
return False, "Invalid value [%s] of type [%s]; must be one of TRUE, True, true, 0x1, 0x01, 1"\
", FALSE, False, false, 0x0, 0x00, 0" % (Value, Type)
elif Type in [TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64]:
if Value.startswith('0') and not Value.lower().startswith('0x') and len(Value) > 1 and Value.lstrip('0'):
Value = Value.lstrip('0')
try:
if Value and int(Value, 0) < 0:
return False, "PCD can't be set to negative value[%s] for datum type [%s]" % (Value, Type)
Value = int(Value, 0)
if Value > MAX_VAL_TYPE[Type]:
return False, "Too large PCD value[%s] for datum type [%s]" % (Value, Type)
except:
return False, "Invalid value [%s] of type [%s];"\
" must be a hexadecimal, decimal or octal in C language format." % (Value, Type)
else:
return True, "StructurePcd"
return True, ""
def CommonPath(PathList):
P1 = min(PathList).split(os.path.sep)
P2 = max(PathList).split(os.path.sep)
for Index in range(min(len(P1), len(P2))):
if P1[Index] != P2[Index]:
return os.path.sep.join(P1[:Index])
return os.path.sep.join(P1)
class PathClass(object):
def __init__(self, File='', Root='', AlterRoot='', Type='', IsBinary=False,
Arch='COMMON', ToolChainFamily='', Target='', TagName='', ToolCode=''):
self.Arch = Arch
self.File = str(File)
if os.path.isabs(self.File):
self.Root = ''
self.AlterRoot = ''
else:
self.Root = str(Root)
self.AlterRoot = str(AlterRoot)
# Remove any '.' and '..' in path
if self.Root:
self.Root = mws.getWs(self.Root, self.File)
self.Path = os.path.normpath(os.path.join(self.Root, self.File))
self.Root = os.path.normpath(CommonPath([self.Root, self.Path]))
# eliminate the side-effect of 'C:'
if self.Root[-1] == ':':
self.Root += os.path.sep
# file path should not start with path separator
if self.Root[-1] == os.path.sep:
self.File = self.Path[len(self.Root):]
else:
self.File = self.Path[len(self.Root) + 1:]
else:
self.Path = os.path.normpath(self.File)
self.SubDir, self.Name = os.path.split(self.File)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.Root:
if self.SubDir:
self.Dir = os.path.join(self.Root, self.SubDir)
else:
self.Dir = self.Root
else:
self.Dir = self.SubDir
if IsBinary:
self.Type = Type
else:
self.Type = self.Ext.lower()
self.IsBinary = IsBinary
self.Target = Target
self.TagName = TagName
self.ToolCode = ToolCode
self.ToolChainFamily = ToolChainFamily
self.OriginalPath = self
## Convert the object of this class to a string
#
# Convert member Path of the class to a string
#
# @retval string Formatted String
#
def __str__(self):
return self.Path
## Override __eq__ function
#
# Check whether PathClass are the same
#
# @retval False The two PathClass are different
# @retval True The two PathClass are the same
#
def __eq__(self, Other):
return self.Path == str(Other)
## Override __cmp__ function
#
# Customize the comparison operation of two PathClass
#
# @retval 0 The two PathClass are different
# @retval -1 The first PathClass is less than the second PathClass
# @retval 1 The first PathClass is Bigger than the second PathClass
def __cmp__(self, Other):
OtherKey = str(Other)
SelfKey = self.Path
if SelfKey == OtherKey:
return 0
elif SelfKey > OtherKey:
return 1
else:
return -1
## Override __hash__ function
#
# Use Path as key in hash table
#
# @retval string Key for hash table
#
def __hash__(self):
return hash(self.Path)
@cached_property
def Key(self):
return self.Path.upper()
@property
def TimeStamp(self):
return os.stat(self.Path)[8]
def Validate(self, Type='', CaseSensitive=True):
def RealPath2(File, Dir='', OverrideDir=''):
NewFile = None
if OverrideDir:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile:
if OverrideDir[-1] == os.path.sep:
return NewFile[len(OverrideDir):], NewFile[0:len(OverrideDir)]
else:
return NewFile[len(OverrideDir) + 1:], NewFile[0:len(OverrideDir)]
if GlobalData.gAllFiles:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(Dir, File))]
if not NewFile:
NewFile = os.path.normpath(os.path.join(Dir, File))
if not os.path.exists(NewFile):
return None, None
if NewFile:
if Dir:
if Dir[-1] == os.path.sep:
return NewFile[len(Dir):], NewFile[0:len(Dir)]
else:
return NewFile[len(Dir) + 1:], NewFile[0:len(Dir)]
else:
return NewFile, ''
return None, None
if GlobalData.gCaseInsensitive:
CaseSensitive = False
if Type and Type.lower() != self.Type:
return FILE_TYPE_MISMATCH, '%s (expect %s but got %s)' % (self.File, Type, self.Type)
RealFile, RealRoot = RealPath2(self.File, self.Root, self.AlterRoot)
if not RealRoot and not RealFile:
RealFile = self.File
if self.AlterRoot:
RealFile = os.path.join(self.AlterRoot, self.File)
elif self.Root:
RealFile = os.path.join(self.Root, self.File)
if len (mws.getPkgPath()) == 0:
return FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)
else:
return FILE_NOT_FOUND, "%s is not found in packages path:\n\t%s" % (self.File, '\n\t'.join(mws.getPkgPath()))
ErrorCode = 0
ErrorInfo = ''
if RealRoot != self.Root or RealFile != self.File:
if CaseSensitive and (RealFile != self.File or (RealRoot != self.Root and RealRoot != self.AlterRoot)):
ErrorCode = FILE_CASE_MISMATCH
ErrorInfo = self.File + '\n\t' + RealFile + " [in file system]"
self.SubDir, self.Name = os.path.split(RealFile)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.SubDir:
self.Dir = os.path.join(RealRoot, self.SubDir)
else:
self.Dir = RealRoot
self.File = RealFile
self.Root = RealRoot
self.Path = os.path.join(RealRoot, RealFile)
return ErrorCode, ErrorInfo
## Parse PE image to get the required PE information.
#
class PeImageClass():
## Constructor
#
# @param File FilePath of PeImage
#
def __init__(self, PeFile):
self.FileName = PeFile
self.IsValid = False
self.Size = 0
self.EntryPoint = 0
self.SectionAlignment = 0
self.SectionHeaderList = []
self.ErrorInfo = ''
try:
PeObject = open(PeFile, 'rb')
except:
self.ErrorInfo = self.FileName + ' can not be found\n'
return
# Read DOS header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x3E)
ByteList = ByteArray.tolist()
# DOS signature should be 'MZ'
if self._ByteListToStr (ByteList[0x0:0x2]) != 'MZ':
self.ErrorInfo = self.FileName + ' has no valid DOS signature MZ'
return
# Read 4 byte PE Signature
PeOffset = self._ByteListToInt(ByteList[0x3C:0x3E])
PeObject.seek(PeOffset)
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 4)
# PE signature should be 'PE\0\0'
if ByteArray.tostring() != b'PE\0\0':
self.ErrorInfo = self.FileName + ' has no valid PE signature PE00'
return
# Read PE file header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x14)
ByteList = ByteArray.tolist()
SecNumber = self._ByteListToInt(ByteList[0x2:0x4])
if SecNumber == 0:
self.ErrorInfo = self.FileName + ' has no section header'
return
# Read PE optional header
OptionalHeaderSize = self._ByteListToInt(ByteArray[0x10:0x12])
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, OptionalHeaderSize)
ByteList = ByteArray.tolist()
self.EntryPoint = self._ByteListToInt(ByteList[0x10:0x14])
self.SectionAlignment = self._ByteListToInt(ByteList[0x20:0x24])
self.Size = self._ByteListToInt(ByteList[0x38:0x3C])
# Read each Section Header
for Index in range(SecNumber):
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x28)
ByteList = ByteArray.tolist()
SecName = self._ByteListToStr(ByteList[0:8])
SecVirtualSize = self._ByteListToInt(ByteList[8:12])
SecRawAddress = self._ByteListToInt(ByteList[20:24])
SecVirtualAddress = self._ByteListToInt(ByteList[12:16])
self.SectionHeaderList.append((SecName, SecVirtualAddress, SecRawAddress, SecVirtualSize))
self.IsValid = True
PeObject.close()
def _ByteListToStr(self, ByteList):
String = ''
for index in range(len(ByteList)):
if ByteList[index] == 0:
break
String += chr(ByteList[index])
return String
def _ByteListToInt(self, ByteList):
Value = 0
for index in range(len(ByteList) - 1, -1, -1):
Value = (Value << 8) | int(ByteList[index])
return Value
class DefaultStore():
def __init__(self, DefaultStores ):
self.DefaultStores = DefaultStores
def DefaultStoreID(self, DefaultStoreName):
for key, value in self.DefaultStores.items():
if value == DefaultStoreName:
return key
return None
def GetDefaultDefault(self):
if not self.DefaultStores or "0" in self.DefaultStores:
return "0", TAB_DEFAULT_STORES_DEFAULT
else:
minvalue = min(int(value_str) for value_str in self.DefaultStores)
return (str(minvalue), self.DefaultStores[str(minvalue)])
def GetMin(self, DefaultSIdList):
if not DefaultSIdList:
return TAB_DEFAULT_STORES_DEFAULT
storeidset = {storeid for storeid, storename in self.DefaultStores.values() if storename in DefaultSIdList}
if not storeidset:
return ""
minid = min(storeidset )
for sid, name in self.DefaultStores.values():
if sid == minid:
return name
class SkuClass():
DEFAULT = 0
SINGLE = 1
MULTIPLE =2
def __init__(self,SkuIdentifier='', SkuIds=None):
if SkuIds is None:
SkuIds = {}
for SkuName in SkuIds:
SkuId = SkuIds[SkuName][0]
skuid_num = int(SkuId, 16) if SkuId.upper().startswith("0X") else int(SkuId)
if skuid_num > 0xFFFFFFFFFFFFFFFF:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] value %s exceeds the max value of UINT64"
% (SkuName, SkuId))
self.AvailableSkuIds = OrderedDict()
self.SkuIdSet = []
self.SkuIdNumberSet = []
self.SkuData = SkuIds
self._SkuInherit = {}
self._SkuIdentifier = SkuIdentifier
if SkuIdentifier == '' or SkuIdentifier is None:
self.SkuIdSet = ['DEFAULT']
self.SkuIdNumberSet = ['0U']
elif SkuIdentifier == 'ALL':
self.SkuIdSet = list(SkuIds.keys())
self.SkuIdNumberSet = [num[0].strip() + 'U' for num in SkuIds.values()]
else:
r = SkuIdentifier.split('|')
self.SkuIdSet=[(r[k].strip()).upper() for k in range(len(r))]
k = None
try:
self.SkuIdNumberSet = [SkuIds[k][0].strip() + 'U' for k in self.SkuIdSet]
except Exception:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (k, " | ".join(SkuIds.keys())))
for each in self.SkuIdSet:
if each in SkuIds:
self.AvailableSkuIds[each] = SkuIds[each][0]
else:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData="SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (each, " | ".join(SkuIds.keys())))
if self.SkuUsageType != SkuClass.SINGLE:
self.AvailableSkuIds.update({'DEFAULT':0, 'COMMON':0})
if self.SkuIdSet:
GlobalData.gSkuids = (self.SkuIdSet)
if 'COMMON' in GlobalData.gSkuids:
GlobalData.gSkuids.remove('COMMON')
if self.SkuUsageType == self.SINGLE:
if len(GlobalData.gSkuids) != 1:
if 'DEFAULT' in GlobalData.gSkuids:
GlobalData.gSkuids.remove('DEFAULT')
if GlobalData.gSkuids:
GlobalData.gSkuids.sort()
def GetNextSkuId(self, skuname):
if not self._SkuInherit:
self._SkuInherit = {}
for item in self.SkuData.values():
self._SkuInherit[item[1]]=item[2] if item[2] else "DEFAULT"
return self._SkuInherit.get(skuname, "DEFAULT")
def GetSkuChain(self, sku):
if sku == "DEFAULT":
return ["DEFAULT"]
skulist = [sku]
nextsku = sku
while True:
nextsku = self.GetNextSkuId(nextsku)
skulist.append(nextsku)
if nextsku == "DEFAULT":
break
skulist.reverse()
return skulist
def SkuOverrideOrder(self):
skuorderset = []
for skuname in self.SkuIdSet:
skuorderset.append(self.GetSkuChain(skuname))
skuorder = []
for index in range(max(len(item) for item in skuorderset)):
for subset in skuorderset:
if index > len(subset)-1:
continue
if subset[index] in skuorder:
continue
skuorder.append(subset[index])
return skuorder
@property
def SkuUsageType(self):
if self._SkuIdentifier.upper() == "ALL":
return SkuClass.MULTIPLE
if len(self.SkuIdSet) == 1:
if self.SkuIdSet[0] == 'DEFAULT':
return SkuClass.DEFAULT
return SkuClass.SINGLE
if len(self.SkuIdSet) == 2 and 'DEFAULT' in self.SkuIdSet:
return SkuClass.SINGLE
return SkuClass.MULTIPLE
def DumpSkuIdArrary(self):
if self.SkuUsageType == SkuClass.SINGLE:
return "{0x0}"
ArrayStrList = []
for skuname in self.AvailableSkuIds:
if skuname == "COMMON":
continue
while skuname != "DEFAULT":
ArrayStrList.append(hex(int(self.AvailableSkuIds[skuname])))
skuname = self.GetNextSkuId(skuname)
ArrayStrList.append("0x0")
return "{{{myList}}}".format(myList=",".join(ArrayStrList))
@property
def AvailableSkuIdSet(self):
return self.AvailableSkuIds
@property
def SystemSkuId(self):
if self.SkuUsageType == SkuClass.SINGLE:
if len(self.SkuIdSet) == 1:
return self.SkuIdSet[0]
else:
return self.SkuIdSet[0] if self.SkuIdSet[0] != 'DEFAULT' else self.SkuIdSet[1]
else:
return 'DEFAULT'
## Get the integer value from string like "14U" or integer like 2
#
# @param Input The object that may be either a integer value or a string
#
# @retval Value The integer value that the input represents
#
def GetIntegerValue(Input):
if not isinstance(Input, str):
return Input
String = Input
if String.endswith("U"):
String = String[:-1]
if String.endswith("ULL"):
String = String[:-3]
if String.endswith("LL"):
String = String[:-2]
if String.startswith("0x") or String.startswith("0X"):
return int(String, 16)
elif String == '':
return 0
else:
return int(String)
#
# Pack a GUID (registry format) list into a buffer and return it
#
def PackGUID(Guid):
return pack(PACK_PATTERN_GUID,
int(Guid[0], 16),
int(Guid[1], 16),
int(Guid[2], 16),
int(Guid[3][-4:-2], 16),
int(Guid[3][-2:], 16),
int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16),
int(Guid[4][-8:-6], 16),
int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16),
int(Guid[4][-2:], 16)
)
#
# Pack a GUID (byte) list into a buffer and return it
#
def PackByteFormatGUID(Guid):
return pack(PACK_PATTERN_GUID,
Guid[0],
Guid[1],
Guid[2],
Guid[3],
Guid[4],
Guid[5],
Guid[6],
Guid[7],
Guid[8],
Guid[9],
Guid[10],
)
## DeepCopy dict/OrderedDict recusively
#
# @param ori_dict a nested dict or ordereddict
#
# @retval new dict or orderdict
#
def CopyDict(ori_dict):
dict_type = ori_dict.__class__
if dict_type not in (dict,OrderedDict):
return ori_dict
new_dict = dict_type()
for key in ori_dict:
if isinstance(ori_dict[key],(dict,OrderedDict)):
new_dict[key] = CopyDict(ori_dict[key])
else:
new_dict[key] = ori_dict[key]
return new_dict
#
# Remove the c/c++ comments: // and /* */
#
def RemoveCComments(ctext):
return re.sub('//.*?\n|/\*.*?\*/', '\n', ctext, flags=re.S)
|
test_httpchunked.py | from http import HTTPStatus
from http.client import HTTPConnection
from http.server import BaseHTTPRequestHandler, HTTPServer
from io import BytesIO
from threading import Thread
from unittest import TestCase
from httpchunked import decode, encode
class TestHTTPChunked(TestCase):
_connection: HTTPConnection
_server: HTTPServer
_thread: Thread
@classmethod
def setUpClass(cls) -> None:
server_address = "", 0
cls._server = HTTPServer(server_address, _Handler)
cls._thread = Thread(target=cls._server.serve_forever)
cls._thread.start()
cls._connection = HTTPConnection("localhost", cls._server.server_port)
def test_decode(self):
body = [b"foo", b"bar", b"baz"]
self._connection.request("POST", "/decode", body=body)
response = self._connection.getresponse()
expected = b"".join(body)
actual = response.read()
self.assertEqual(expected, actual)
def test_encode(self):
body = b"foo"
headers = {"Accept-Encoding": "chunked"}
self._connection.request("POST", "/encode", body=body, headers=headers)
response = self._connection.getresponse()
expected = body
actual = response.read()
self.assertEqual(expected, actual)
@classmethod
def tearDownClass(cls) -> None:
cls._connection.close()
cls._server.shutdown()
cls._server.server_close()
cls._thread.join()
class _Handler(BaseHTTPRequestHandler):
protocol_version = "HTTP/1.1"
def do_POST(self):
if self.path == "/decode":
self.send_response(HTTPStatus.OK)
buf = BytesIO()
decode(buf, self.rfile)
raw = buf.getvalue()
content_length = len(raw)
self.send_header("Content-Length", content_length)
self.end_headers()
self.wfile.write(raw)
elif self.path == "/encode":
self.send_response(HTTPStatus.OK)
self.send_header("Transfer-Encoding", "chunked")
self.end_headers()
content_length_str = self.headers["Content-Length"]
content_length = int(content_length_str)
raw = self.rfile.read(content_length)
buf = BytesIO(raw)
encode(self.wfile, buf)
else:
self.send_error(HTTPStatus.NOT_FOUND)
def log_message(self, format_, *args):
pass
|
lock_example.py | import threading
worker_lock = threading.Lock()
def do_some_work():
with worker_lock:
print('I have lock when I am writing it!')
workers = []
for _ in range(10):
thread = threading.Thread(target=do_some_work)
workers.append(thread)
thread.start()
for worker in workers:
worker.join()
print('All work is done - bye bye!')
|
web_server.py | import logging
from http.server import BaseHTTPRequestHandler
from pathlib import Path
import threading
def html_encode_file(name, directory: str = 'html', replace_dict: dict = None):
html = Path(Path(__file__).parent.parent.resolve(), directory, name).read_text()
if replace_dict:
html_str = str(html)
for key, val in replace_dict.items():
html_str = html_str.replace(f'||{key}||', val)
return html_str.encode("utf8")
else:
return html.encode("utf8")
class WebRequestHandler(BaseHTTPRequestHandler):
def not_found(self):
self.wfile.write('Nothing to See Here'.encode("utf8"))
@property
def path_list(self):
temp_path_list = self.path.split('/')
path_list = list()
for path in temp_path_list:
if len(path) > 0:
path_list.append(path)
return path_list
def is_in_path_list(self, path_list: list):
if len(path_list) >= 1:
if path_list[0] is None:
del path_list[0]
if len(path_list) <= len(self.path_list):
for i in range(len(path_list)):
if path_list[i] != self.path_list[i]:
return False
else:
return True
else:
return False
def served_css_js(self):
if 1 < len(self.path.split('.')) < 3:
if self.path[1:].split('.')[1] in ('js', 'css', 'html'):
if len(self.path.split('/')) > 2:
file_name = self.path.split('/')[2]
else:
file_name = self.path[1:]
self.wfile.write(html_encode_file(file_name))
return True
return False
def _set_headers(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_GET(self):
pass
def do_HEAD(self):
self._set_headers()
def do_POST(self):
self._set_headers()
self.not_found()
class WebServer:
def __init__(self, address='localhost', port=8000, key=None, html_replace_dict=None):
self.api_dict = dict()
self.post_dict = dict()
self.address = address
self.port = int(port)
self.key = key
self.html_replace_dict = html_replace_dict
self.thread = threading.Thread(target=self.httpd_serve)
self.thread.daemon = True
def httpd_serve(self):
pass
def restart(self):
pass
def start(self):
self.thread.start()
href_link = f'http://{self.address}:{self.port}'
if self.key:
href_link += f'/{self.key}/'
print(f'Starting httpd server at {href_link}')
if self.key is None:
print(f'We do not reccomend running servers with out keys!')
def stop(self):
pass
def update_api_dict(self, update_dict: dict):
for key, val in update_dict.items():
self.api_dict[key] = val
|
sunny.py | #! /usr/bin/python
# -*- coding: UTF-8 -*-
import getopt
import socket
import ssl
import json
import struct
import random
import sys
import time
import logging
import threading
python_version = sys.version_info >= (3, 0)
if not python_version:
reload(sys)
sys.setdefaultencoding('utf8')
options = {
'clientid':'',
}
def usage():
print(
' -h help \n' \
' -a clientid xxxxxxxxxxxxxxxx\n' \
)
sys.exit()
try:
opts, args = getopt.getopt(sys.argv[1:], "h:c:", ['help', "clientid="])
except getopt.GetoptError:
usage()
if len(opts) == 0:
print(
'usage\n' \
'python sunny.py --clientid=xxxxxxxx\n' \
'python sunny.py --clientid=xxxxxxxx,xxxxxxxx\n' \
)
for option, value in opts:
if option in ['-h', '--help']:
usage()
if option in ['-c', '--clientid']:
options['clientid'] = value
if options['clientid'] == '':
if not python_version:
input_clientid = raw_input('input clientid:')
else:
input_clientid = str(input('input clientid:'))
if input_clientid != '':
options['clientid'] = input_clientid
else:
sys.exit()
Tunnels = list()
reqIdaddr = dict()
localaddr = dict()
# ngrok.cc
def ngrok_adds(Tunnel):
global Tunnels
for tunnelinfo in Tunnel:
if tunnelinfo.get('proto'):
if tunnelinfo.get('proto').get('http'):
protocol = 'http'
if tunnelinfo.get('proto').get('https'):
protocol = 'https'
if tunnelinfo.get('proto').get('tcp'):
protocol = 'tcp'
proto = tunnelinfo['proto'][protocol].split(':') # 127.0.0.1:80
if proto[0] == '':
proto[0] = '127.0.0.1'
if proto[1] == '' or proto[1] == 0:
proto[1] = 80
body = dict()
body['protocol'] = protocol
body['hostname'] = tunnelinfo['hostname']
body['subdomain'] = tunnelinfo['subdomain']
body['httpauth'] = tunnelinfo['httpauth']
body['rport'] = tunnelinfo['remoteport']
body['lhost'] = str(proto[0])
body['lport'] = int(proto[1])
Tunnels.append(body)
# ngrok.cc
def ngrok_auth(options):
host = 'www.ngrok.cc'
port = 443
try:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_client = ssl.wrap_socket(client, ssl_version=ssl.PROTOCOL_TLSv1) # ssl.PROTOCOL_TLSv1_2
ssl_client.connect((host, port))
except Exception:
print('connected auth server failed: https://www.ngrok.cc')
time.sleep(10)
sys.exit()
header = "POST " + "/api/clientid/clientid/%s" + " HTTP/1.1" + "\r\n"
header += "Content-Type: text/html" + "\r\n"
header += "Host: %s" + "\r\n"
header += "\r\n"
buf = header % (options, host)
ssl_client.sendall(buf.encode('utf-8'))
fd = ssl_client.makefile('rb', 0)
body = bytes()
while True:
line = fd.readline().decode('utf-8')
if line == "\n" or line == "\r\n":
chunk_size = int(fd.readline(), 16)
if chunk_size > 0:
body = fd.read(chunk_size).decode('utf-8')
break
ssl_client.close()
authData = json.loads(body)
if authData['status'] != 200:
print('auth failed:%s, ErrorCode:%s' % (authData['msg'], authData['status']))
time.sleep(10)
sys.exit()
print('auth success, connecting server...')
ngrok_adds(authData['data'])
proto = authData['server'].split(':')
return proto
print('welcome to use python-ngrok v1.42\r\nCtrl+C to quit')
serverArr = ngrok_auth(options['clientid'])
host = str(serverArr[0]) # Ngrok server address
port = int(serverArr[1]) # port
bufsize = 1024
mainsocket = 0
ClientId = ''
pingtime = 0
def getloacladdr(Tunnels, Url):
protocol = Url[0:Url.find(':')]
hostname = Url[Url.find('//') + 2:]
subdomain = hostname[0:hostname.find('.')]
rport = Url[Url.rfind(':') + 1:]
for tunnelinfo in Tunnels:
if tunnelinfo.get('protocol') == protocol:
if tunnelinfo.get('protocol') in ['http', 'https']:
if tunnelinfo.get('hostname') == hostname:
return tunnelinfo
if tunnelinfo.get('subdomain') == subdomain:
return tunnelinfo
if tunnelinfo.get('protocol') == 'tcp':
if tunnelinfo.get('rport') == int(rport):
return tunnelinfo
return dict()
def dnsopen(host):
try:
ip = socket.gethostbyname(host)
except socket.error:
return False
return ip
def connectremote(host, port):
try:
host = socket.gethostbyname(host)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_client = ssl.wrap_socket(client, ssl_version=ssl.PROTOCOL_SSLv23)
ssl_client.connect((host, port))
ssl_client.setblocking(1)
logger = logging.getLogger('%s:%d' % ('Conn', ssl_client.fileno()))
logger.debug('New connection to: %s:%d' % (host, port))
except socket.error:
return False
return ssl_client
def connectlocal(localhost, localport):
try:
localhost = socket.gethostbyname(localhost)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((localhost, localport))
client.setblocking(1)
logger = logging.getLogger('%s:%d' % ('Conn', client.fileno()))
logger.debug('New connection to: %s:%d' % (localhost, localport))
except socket.error:
return False
return client
def NgrokAuth():
Payload = dict()
Payload['ClientId'] = ''
Payload['OS'] = 'darwin'
Payload['Arch'] = 'amd64'
Payload['Version'] = '2'
Payload['MmVersion'] = '2.1'
Payload['User'] = 'user'
Payload['Password'] = ''
body = dict()
body['Type'] = 'Auth'
body['Payload'] = Payload
buffer = json.dumps(body)
return(buffer)
def ReqTunnel(ReqId, Protocol, Hostname, Subdomain, HttpAuth, RemotePort):
Payload = dict()
Payload['ReqId'] = ReqId
Payload['Protocol'] = Protocol
Payload['Hostname'] = Hostname
Payload['Subdomain'] = Subdomain
Payload['HttpAuth'] = HttpAuth
Payload['RemotePort'] = RemotePort
body = dict()
body['Type'] = 'ReqTunnel'
body['Payload'] = Payload
buffer = json.dumps(body)
return(buffer)
def RegProxy(ClientId):
Payload = dict()
Payload['ClientId'] = ClientId
body = dict()
body['Type'] = 'RegProxy'
body['Payload'] = Payload
buffer = json.dumps(body)
return(buffer)
def Ping():
Payload = dict()
body = dict()
body['Type'] = 'Ping'
body['Payload'] = Payload
buffer = json.dumps(body)
return(buffer)
def lentobyte(len):
return struct.pack('<LL', len, 0)
def sendbuf(sock, buf, isblock = False):
if isblock:
sock.setblocking(1)
sock.sendall(buf)
if isblock:
sock.setblocking(0)
def sendpack(sock, msg, isblock = False):
if isblock:
sock.setblocking(1)
sock.sendall(lentobyte(len(msg)) + msg.encode('utf-8'))
logger = logging.getLogger('%s:%d' % ('Send', sock.fileno()))
logger.debug('Writing message: %s' % msg)
if isblock:
sock.setblocking(0)
def tolen(v):
if len(v) == 8:
return struct.unpack('<II', v)[0]
return 0
def getRandChar(length):
_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz"
return ''.join(random.sample(_chars, length))
def HKClient(sock, linkstate, type, tosock = None):
global mainsocket
global ClientId
global pingtime
global reqIdaddr
global localaddr
recvbuf = bytes()
while True:
try:
if linkstate == 0:
if type == 1:
sendpack(sock, NgrokAuth(), False)
linkstate = 1
if type == 2:
sendpack(sock, RegProxy(ClientId), False)
linkstate = 1
if type == 3:
linkstate = 1
recvbut = sock.recv(bufsize)
if not recvbut: break
if len(recvbut) > 0:
if not recvbuf:
recvbuf = recvbut
else:
recvbuf += recvbut
if type == 1 or (type == 2 and linkstate == 1):
lenbyte = tolen(recvbuf[0:8])
if len(recvbuf) >= (8 + lenbyte):
buf = recvbuf[8:lenbyte + 8].decode('utf-8')
logger = logging.getLogger('%s:%d' % ('Recv', sock.fileno()))
logger.debug('Reading message with length: %d' % len(buf))
logger.debug('Read message: %s' % buf)
js = json.loads(buf)
if type == 1:
if js['Type'] == 'ReqProxy':
newsock = connectremote(host, port)
if newsock:
thread = threading.Thread(target = HKClient, args = (newsock, 0, 2))
thread.setDaemon(True)
thread.start()
if js['Type'] == 'AuthResp':
ClientId = js['Payload']['ClientId']
logger = logging.getLogger('%s' % 'client')
logger.debug('Authenticated with server, client id: %s' % ClientId)
sendpack(sock, Ping())
pingtime = time.time()
for info in Tunnels:
reqid = getRandChar(8)
sendpack(sock, ReqTunnel(reqid, info['protocol'], info['hostname'], info['subdomain'], info['httpauth'], info['rport']))
reqIdaddr[reqid] = (info['lhost'], info['lport'])
if js['Type'] == 'NewTunnel':
if js['Payload']['Error'] != '':
logger = logging.getLogger('%s' % 'client')
logger.error('Server failed to allocate tunnel: %s' % js['Payload']['Error'])
print('failed to create tunnel: %s' % js['Payload']['Error'])
time.sleep(30)
else:
logger = logging.getLogger('%s' % 'client')
logger.debug('Tunnel established at %s' % js['Payload']['Url'])
print('success to create tunnel: %s' % js['Payload']['Url'])
localaddr[js['Payload']['Url']] = reqIdaddr[js['Payload']['ReqId']]
if type == 2:
if js['Type'] == 'StartProxy':
localhost, localport = localaddr[js['Payload']['Url']]
newsock = connectlocal(localhost, localport)
if newsock:
thread = threading.Thread(target = HKClient, args = (newsock, 0, 3, sock))
thread.setDaemon(True)
thread.start()
tosock = newsock
linkstate = 2
else:
body = '<!DOCTYPE html><html><head><meta charset="utf-8"><title>Web Service Error</title><meta name="viewport" content="initial-scale=1,maximum-scale=1,user-scalable=no"><meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"><style>html,body{height:100%%}body{margin:0;padding:0;width:100%%;display:table;font-weight:100;font-family:"Microsoft YaHei",Arial,Helvetica,sans-serif}.container{text-align:center;display:table-cell;vertical-align:middle}.content{border:1px solid #ebccd1;text-align:center;display:inline-block;background-color:#f2dede;color:#a94442;padding:30px}.title{font-size:18px}.copyright{margin-top:30px;text-align:right;color:#000}</style></head><body><div class="container"><div class="content"><div class="title">tunnel %s invaild<br>can not connect to<strong>%s</strong>. the port did not provide web service yet</div></div></div></body></html>'
html = body % (js['Payload']['Url'], localhost + ':' + str(localport))
header = "HTTP/1.0 502 Bad Gateway" + "\r\n"
header += "Content-Type: text/html" + "\r\n"
header += "Content-Length: %d" + "\r\n"
header += "\r\n" + "%s"
buf = header % (len(html.encode('utf-8')), html)
sendbuf(sock, buf.encode('utf-8'))
if len(recvbuf) == (8 + lenbyte):
recvbuf = bytes()
else:
recvbuf = recvbuf[8 + lenbyte:]
if type == 3 or (type == 2 and linkstate == 2):
sendbuf(tosock, recvbuf)
recvbuf = bytes()
except socket.error:
break
if type == 1:
mainsocket = False
if type == 3:
try:
tosock.shutdown(socket.SHUT_WR)
except socket.error:
tosock.close()
logger = logging.getLogger('%s:%d' % ('Close', sock.fileno()))
logger.debug('Closing')
sock.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] [%(levelname)s] [%(name)s] %(message)s', datefmt='%Y/%m/%d %H:%M:%S')
logger = logging.getLogger('%s' % 'client')
logger.debug('python-ngrok v1.5')
while True:
try:
if mainsocket == False:
ip = dnsopen(host)
if ip == False:
logger = logging.getLogger('%s' % 'client')
logger.debug('update dns')
print('failed to connect to ngrok server')
time.sleep(10)
continue
mainsocket = connectremote(ip, port)
if mainsocket == False:
logger = logging.getLogger('%s' % 'client')
logger.debug('connect failed...!')
print('failed to connect to ngrok server')
time.sleep(10)
continue
thread = threading.Thread(target = HKClient, args = (mainsocket, 0, 1))
thread.setDaemon(True)
thread.start()
if pingtime + 20 < time.time() and pingtime != 0:
sendpack(mainsocket, Ping())
pingtime = time.time()
time.sleep(1)
except socket.error:
pingtime = 0
except KeyboardInterrupt:
sys.exit() |
test_BaseClient_live.py |
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
from client import JobGetter
from django.test import override_settings
from mock import patch
import os, subprocess
import threading
import time
from ci import views
from ci.tests import utils as test_utils
from client.tests import LiveClientTester, utils
@override_settings(INSTALLED_GITSERVERS=[test_utils.github_config()])
class Tests(LiveClientTester.LiveClientTester):
def create_client_and_job(self, recipe_dir, name, sleep=1):
c = utils.create_base_client()
c.set_environment('BUILD_ROOT', '/foo/bar')
c.client_info["single_shot"] = True
c.client_info["update_step_time"] = 1
c.client_info["ssl_cert"] = False # not needed but will get another line of coverage
c.client_info["server"] = self.live_server_url
c.client_info["servers"] = [self.live_server_url]
job = utils.create_client_job(recipe_dir, name=name, sleep=sleep)
if job.config.name not in c.get_client_info("build_configs"):
c.add_config(job.config.name)
c.client_info["build_key"] = job.recipe.build_user.build_key
return c, job
def test_no_signals(self):
with test_utils.RecipeDir() as recipe_dir:
# This is just for coverage. We can't really
# test this because if we send a signal it will just quit
import signal
old_signal = signal.SIGUSR2
del signal.SIGUSR2
c, job = self.create_client_and_job(recipe_dir, "No signal", sleep=2)
signal.SIGUSR2 = old_signal
def test_run_success(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "RunSuccess", sleep=2)
self.set_counts()
c.run()
self.compare_counts(num_clients=1, num_events_completed=1, num_jobs_completed=1, active_branches=1)
utils.check_complete_job(self, job, c)
def test_run_graceful(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "Graceful", sleep=2)
self.set_counts()
c.client_info["single_shot"] = False
c.client_info["poll"] = 1
# graceful signal, should complete
script = "sleep 3 && kill -USR2 %s" % os.getpid()
proc = subprocess.Popen(script, shell=True, executable="/bin/bash", stdout=subprocess.PIPE)
c.run()
proc.wait()
self.compare_counts(num_clients=1, num_events_completed=1, num_jobs_completed=1, active_branches=1)
utils.check_complete_job(self, job, c)
self.assertEqual(c.graceful_signal.triggered, True)
self.assertEqual(c.cancel_signal.triggered, False)
def test_run_cancel(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "Cancel", sleep=4)
self.set_counts()
c.client_info["single_shot"] = False
c.client_info["poll"] = 1
# cancel signal, should stop
script = "sleep 3 && kill -USR1 %s" % os.getpid()
proc = subprocess.Popen(script, shell=True, executable="/bin/bash", stdout=subprocess.PIPE)
c.run()
proc.wait()
self.compare_counts(canceled=1,
num_clients=1,
num_events_completed=1,
num_jobs_completed=1,
active_branches=1,
events_canceled=1,
)
self.assertEqual(c.cancel_signal.triggered, True)
self.assertEqual(c.graceful_signal.triggered, False)
utils.check_canceled_job(self, job, c)
def test_run_job_cancel(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "JobCancel", sleep=4)
# cancel response, should cancel the job
self.set_counts()
thread = threading.Thread(target=c.run)
thread.start()
time.sleep(4)
job.refresh_from_db()
views.set_job_canceled(job)
thread.join()
self.compare_counts(canceled=1,
num_clients=1,
num_events_completed=1,
num_jobs_completed=1,
active_branches=1,
events_canceled=1,
)
self.assertEqual(c.cancel_signal.triggered, False)
self.assertEqual(c.graceful_signal.triggered, False)
utils.check_canceled_job(self, job, c)
def test_run_job_invalidated_basic(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "JobInvalidated", sleep=40)
# stop response, should stop the job
self.set_counts()
thread = threading.Thread(target=c.run)
thread.start()
start_time = time.time()
time.sleep(4)
job.refresh_from_db()
job.set_invalidated("Test invalidation", check_ready=True)
thread.join()
end_time = time.time()
self.assertGreater(15, end_time-start_time)
self.compare_counts(invalidated=1, num_clients=1, num_changelog=1)
utils.check_stopped_job(self, job)
def test_run_job_invalidated_nested_bash(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "JobInvalidated", sleep=40)
job.delete()
job = utils.create_job_with_nested_bash(recipe_dir, name="JobWithNestedBash", sleep=40)
# stop response, should stop the job
self.set_counts()
thread = threading.Thread(target=c.run)
start_time = time.time()
thread.start()
time.sleep(4)
job.refresh_from_db()
job.set_invalidated("Test invalidation", check_ready=True)
thread.join()
end_time = time.time()
self.assertGreater(15, end_time-start_time)
self.compare_counts(num_clients=1, invalidated=1, num_changelog=1)
utils.check_stopped_job(self, job)
@patch.object(JobGetter.JobGetter, 'find_job')
def test_exception(self, mock_getter):
with test_utils.RecipeDir() as recipe_dir:
# check exception handler
mock_getter.side_effect = Exception("oh no!")
c, job = self.create_client_and_job(recipe_dir, "JobStop", sleep=4)
self.set_counts()
c.run()
self.compare_counts()
@patch.object(JobGetter.JobGetter, 'find_job')
def test_runner_error(self, mock_getter):
with test_utils.RecipeDir() as recipe_dir:
mock_getter.return_value = None
c, job = self.create_client_and_job(recipe_dir, "JobError")
self.set_counts()
c.runner_error = True
c.run()
self.compare_counts()
def test_bad_thread_join(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "BadThreadJoin", sleep=2)
c.thread_join_wait = 0
self.set_counts()
c.run()
self.compare_counts(num_clients=1, num_events_completed=1, num_jobs_completed=1, active_branches=1)
utils.check_complete_job(self, job, c)
|
test2.py | #!/usr/bin/env python
from __future__ import division, print_function
import time
import random
from subprocess import call
from multiprocessing import Process
try:
from linuxLED import LEDTurtle
except ImportError:
from linuxLED import LED
def say(msg):
call(msg, shell=True)
class TTS(object):
"""
Apple - say
"""
def __init__(self, tts=None):
"""
You can pass in a specific tts program or let it search for one to
use. It searches for: say, espeak, spd-say. If no program is found, it uses
echo to print text to the command line
"""
if tts:
self.tts = tts
else:
found = False
for cmd in ['espeak', 'spd-say', 'say']:
if call(['which -s {}'.format(cmd)], shell=True) == 0:
self.tts = [cmd]
# self.tts = cmd
found = True
break
if not found:
print('could not find a tts program, using echo instead')
self.tts = ['echo']
def setOptions(self, options):
"""
Change the default options
"""
self.tts.append(options)
def say(self, txt):
"""
Speak the text passed to this function. If no tts was found, then this
will print the text instead.
"""
c = self.tts[:] # make a copy
c.append(txt)
c = ' '.join(c)
# call(' '.join(c), shell=True)
p = Process(target=say, args=(c,))
p.start()
# p.join()
class Eyes(object):
eyes = {
'normal': [[0,0,2,0,2,0,0]], # . .
'left': [[0,2,0,2,0,0,0]], # . .
'right': [[0,0,0,2,0,2,0]], # . .
'mad': [ # \ /
[0,2,0,0,0,2,0],
[0,0,2,0,2,0,0]
],
'sad': [ # / \
[0,0,2,0,2,0,0],
[0,2,0,0,0,2,0]
],
'happy': [ # > <
[0,2,0,0,0,2,0],
[0,0,2,0,2,0,0],
[0,2,0,0,0,2,0]
],
'dead': [ # x x
[2,0,2,0,2,0,2],
[0,2,0,0,0,2,0],
[2,0,2,0,2,0,2]
],
'surprise': [ # o o
[0,2,2,0,2,2,0],
[0,2,2,0,2,2,0]
]
}
def __init__(self):
# self.led = leds
# self.dims = dims
pass
def look(self, loc):
if loc not in self.eyes:
raise Exception('Invalide eyes value:', loc)
return self.eyes[loc]
class Mouth(object):
letters = {
'a': [ # 7x6
[0,2,2,2,2,2,0],
[0,2,2,2,2,2,0],
[0,2,2,2,2,2,0], # set value to brightness?
[0,2,1,1,1,2,0],
[0,0,2,2,2,0,0]
],
'e': [ # 7x5
[0,2,2,2,2,2,0],
[0,2,2,2,2,2,0],
[0,2,1,1,1,2,0], # set value to brightness?
[0,0,2,2,2,0,0]
],
'o': [ # 7x4
[0,0,0,2,2,0,0],
[0,0,2,2,2,2,0],
[0,0,2,2,2,2,0],
[0,0,0,2,2,0,0],
],
'uq': [ # 7x2
[0,0,2,2,0,0,0],
[0,0,2,2,0,0,0],
],
'wr': [
[0,0,0,2,2,0,0],
[0,0,2,2,2,2,0],
[0,0,2,1,1,2,0],
[0,0,0,2,2,0,0],
],
'ts': [ # 7x3
[2,2,2,2,2,2,2],
[0,2,0,0,0,2,0],
[0,0,2,2,2,0,0]
],
'ln': [ # 7x5
[2,0,0,0,0,0,2],
[0,2,2,2,2,2,0],
[0,2,2,1,1,2,0],
[0,2,1,1,2,2,0], # set value to brightness?
[0,0,2,2,2,0,0]
],
'mbp': [ # 7x1
[0,2,2,2,2,2,0]
],
'fv': [
[0,2,2,2,2,2,0],
[0,0,2,0,2,0,0]
],
'nothing': [ # 7x1
[0,2,2,2,2,2,0]
],
'other': [ # 7x5
[0,0,0,0,0,0,0],
[0,0,2,2,2,0,0],
[0,2,2,2,2,2,0], # set value to brightness?
[0,0,2,2,2,0,0]
],
}
def __init__(self):
pass
def letter(self, ltr):
ltr = ltr.lower()
if ltr == 'a': ltr = self.letters['a']
elif ltr == 'o': ltr = self.letters['o']
elif ltr == 'e': ltr = self.letters['e']
elif ltr == 'w' or ltr == 'r': ltr = self.letters['wr']
elif ltr == 't' or ltr == 's': ltr = self.letters['ts']
elif ltr == 'l' or ltr == 'n': ltr = self.letters['ln']
elif ltr == 'u' or ltr == 'q': ltr = self.letters['uq']
elif ltr == 'm' or ltr == 'b' or ltr == 'p': ltr = self.letters['mbp']
elif ltr == 'f' or ltr == 'v': ltr = self.letters['fv']
elif 97 <= ord(ltr) <= 122: ltr = self.letters['other']
else: ltr = self.letters['nothing']
m = ltr + [[0]*7 for i in range(7-len(ltr))]
# self.led.draw(ltr, row_offset=10)
return m
class LEDFace(object):
# value?
look_size = {
'forward': 4,
'up': 0,
'down': 9
}
nose = [[0]*7]
def __init__(self, drawClass):
# self.led = drawClass.__init__(self)
self.led = drawClass()
self.eyeLim = 10 # eye goes 0-9, offset to start mouth
self.mouth = Mouth()
self.eye = Eyes()
self.look('forward')
self.tts = TTS()
# print('forehead', self.forehead)
# print('nose', self.nose)
def look(self, where):
self.forehead = [[0]*7 for i in range(self.look_size[where])]
def __del__(self):
pass
def talk(self, msg):
self.look('forward')
eye_type = 'normal'
self.tts.say(msg)
for ltr in msg:
if ltr == ' ':
where = random.choice(self.look_size.keys())
print(where)
self.look(where)
eye_type = random.choice(self.eye.eyes.keys())
print('eye_type:', eye_type)
print(ltr)
m = self.mouth.letter(ltr)
e = self.eye.look(eye_type)
face = self.forehead+e+self.nose+m
zeros = [[0]*7 for i in range(17-len(face))]
self.led.draw(face + zeros)
# time.sleep(.001)
def draw(self, pic):
self.led.draw(pic)
led = LEDFace(LED)
# led = LEDFace(LEDTurtle)
led.talk('hi how are you! This is a test')
time.sleep(1)
|
parallel_percrank_train.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Parallel training for Perceptron ranker (using Qsub & RPyC).
When run as main, this file will start a worker and register with the address given
in command-line parameters.
Usage: ./parallel_percrank_train.py <head-address> <head-port>
@todo: Local training on multiple cores should not be hard to implement.
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range
from collections import deque, namedtuple
import sys
from threading import Thread
import socket
import pickle as pickle
import time
import datetime
import os
import tempfile
import numpy as np
from rpyc import Service, connect, async_
from rpyc.utils.server import ThreadPoolServer
from tgen.futil import file_stream
from .logf import log_info, set_debug_stream, log_debug
from tgen.logf import log_warn, is_debug_stream
from tgen.rnd import rnd
from tgen.rank import Ranker, PerceptronRanker
from tgen.cluster import Job
class ServiceConn(namedtuple('ServiceConn', ['host', 'port', 'conn'])):
"""This stores a connection along with its address."""
pass
def dump_ranker(ranker, work_dir):
fh = tempfile.NamedTemporaryFile(suffix='.pickle', prefix='rdump-', dir=work_dir, delete=False)
# we are storing training features separately in NumPy format since including them in
# the pickle may lead to a crash for very large feature matrices
# (see https://github.com/numpy/numpy/issues/2396).
train_feats = ranker.train_feats
ranker.train_feats = None
pickle.dump(ranker, fh, protocol=pickle.HIGHEST_PROTOCOL)
np.save(fh, train_feats)
fh.close()
return fh.name
def load_ranker(dump):
with open(dump, 'rb') as fh:
ranker = pickle.load(fh)
train_feats = np.load(fh)
ranker.train_feats = train_feats
return ranker
def get_worker_registrar_for(head):
"""Return a class that will handle worker registration for the given head."""
# create a dump of the head to be passed to workers
log_info('Saving ranker init state...')
tstart = time.time()
ranker_dump_path = dump_ranker(head.loc_ranker, head.work_dir)
log_info('Ranker init state saved in %s, it took %f secs.' % (ranker_dump_path,
time.time() - tstart))
class WorkerRegistrarService(Service):
def exposed_register_worker(self, host, port):
"""Register a worker with my head, initialize it."""
# initiate connection in the other direction
log_info('Worker %s:%d connected, initializing training.' % (host, port))
conn = connect(host, port, config={'allow_pickle': True})
# initialize the remote server (with training data etc.)
init_func = async_(conn.root.init_training)
req = init_func(ranker_dump_path)
# add it to the list of running services
sc = ServiceConn(host, port, conn)
head.services.add(sc)
head.pending_requests.add((sc, None, req))
log_info('Worker %s:%d initialized.' % (host, port))
return WorkerRegistrarService, ranker_dump_path
class ParallelRanker(Ranker):
"""This is used to train rankers in parallel (supports any ranker class)."""
DEFAULT_PORT = 25125
def __init__(self, cfg, work_dir, experiment_id=None, ranker_class=PerceptronRanker):
# initialize base class
super(ParallelRanker, self).__init__()
# initialize myself
self.work_dir = work_dir
self.jobs_number = cfg.get('jobs_number', 10)
self.data_portions = cfg.get('data_portions', self.jobs_number)
self.job_memory = cfg.get('job_memory', 4)
self.port = cfg.get('port', self.DEFAULT_PORT)
self.host = socket.getfqdn()
self.poll_interval = cfg.get('poll_interval', 1)
self.experiment_id = experiment_id if experiment_id is not None else ''
# this will be needed when running
self.server = None
self.server_thread = None
self.jobs = None
self.pending_requests = None
self.services = None
self.free_services = None
self.results = None
# create a local ranker instance that will be copied to all parallel workers
# and will be used to average weights after each iteration
self.loc_ranker = ranker_class(cfg)
def train(self, das_file, ttree_file, data_portion=1.0):
"""Run parallel perceptron training, start and manage workers."""
# initialize the ranker instance
log_info('Initializing...')
self.loc_ranker._init_training(das_file, ttree_file, data_portion)
# run server to process registering clients
self._init_server()
# spawn training jobs
log_info('Spawning jobs...')
host_short, _ = self.host.split('.', 1) # short host name for job names
for j in range(self.jobs_number):
# set up debugging logfile only if we have it on the head
debug_logfile = ('"PRT%02d.debug-out.txt.gz"' % j) if is_debug_stream() else 'None'
job = Job(header='from tgen.parallel_percrank_train import run_worker',
code=('run_worker("%s", %d, %s)' %
(self.host, self.port, debug_logfile)),
name=self.experiment_id + ("PRT%02d-%s-%d" % (j, host_short, self.port)),
work_dir=self.work_dir)
job.submit(self.job_memory)
self.jobs.append(job)
# run the training passes
try:
for iter_no in range(1, self.loc_ranker.passes + 1):
log_info('Pass %d...' % iter_no)
log_debug('\n***\nTR%05d:' % iter_no)
iter_start_time = time.time()
cur_portion = 0
results = [None] * self.data_portions
w_dump = pickle.dumps(self.loc_ranker.get_weights(), protocol=pickle.HIGHEST_PROTOCOL)
rnd_seeds = [rnd.random() for _ in range(self.data_portions)]
# wait for free services / assign computation
while cur_portion < self.data_portions or self.pending_requests:
log_debug('Starting loop over services.')
# check if some of the pending computations have finished
for sc, req_portion, req in list(self.pending_requests):
res = self._check_pending_request(iter_no, sc, req_portion, req)
if res:
results[req_portion] = res
# check for free services and assign new computation
while cur_portion < self.data_portions and self.free_services:
log_debug('Assigning request %d' % cur_portion)
sc = self.free_services.popleft()
log_info('Assigning request %d / %d to %s:%d' %
(iter_no, cur_portion, sc.host, sc.port))
train_func = async_(sc.conn.root.training_pass)
req = train_func(w_dump, iter_no, rnd_seeds[cur_portion],
* self._get_portion_bounds(cur_portion))
self.pending_requests.add((sc, cur_portion, req))
cur_portion += 1
log_debug('Assigned %d' % cur_portion)
# sleep for a while
log_debug('Sleeping.')
time.sleep(self.poll_interval)
# delete the temporary ranker dump when the 1st iteration is complete
if self.ranker_dump_path:
log_info('Removing temporary ranker dump at %s.' % self.ranker_dump_path)
os.remove(self.ranker_dump_path)
self.ranker_dump_path = None
# gather/average the diagnostic statistics
self.loc_ranker.set_diagnostics_average([d for _, d in results])
# take an average of weights; set it as new w
self.loc_ranker.set_weights_average([w for w, _ in results])
self.loc_ranker.store_iter_weights() # store a copy of w for averaged perceptron
# print statistics
log_debug(self.loc_ranker._feat_val_str(), '\n***')
self.loc_ranker._print_pass_stats(iter_no, datetime.timedelta(seconds=(time.time() - iter_start_time)))
# after all passes: average weights if set to do so
if self.loc_ranker.averaging is True:
self.loc_ranker.set_weights_iter_average()
# kill all jobs
finally:
for job in self.jobs:
job.delete()
def _check_pending_request(self, iter_no, sc, req_portion, req):
"""Check whether the given request has finished (i.e., job is loaded or job has
processed the given data portion.
If the request is finished, the worker that processed it is moved to the pool
of free services.
@param iter_no: current iteration number (for logging)
@param sc: a ServiceConn object that stores the worker connection parameters
@param req_portion: current data portion number (is None for jobs loading)
@param req: the request itself
@return: the value returned by the finished data processing request, or None \
(for loading requests or unfinished requests)
"""
result = None
if req_portion is not None:
log_debug('Checking %d' % req_portion)
# checking if the request has finished
if req.ready:
# loading requests -- do nothing (just logging)
if req_portion is None:
if req.error:
log_info('Error loading on %s:%d' % (sc.host, sc.port))
else:
log_info('Worker %s:%d finished loading.' % (sc.host, sc.port))
# data processing request -- retrieve the value
else:
log_debug('Ready %d' % req_portion)
log_info('Retrieved finished request %d / %d' % (iter_no, req_portion))
if req.error:
log_info('Error found on request: IT %d PORTION %d, WORKER %s:%d' %
(iter_no, req_portion, sc.host, sc.port))
result = pickle.loads(req.value)
# add the worker to the pool of free services (both loading and data processing requests)
self.pending_requests.remove((sc, req_portion, req))
self.free_services.append(sc)
if req_portion is not None:
log_debug('Done with %d' % req_portion)
return result
def _init_server(self):
"""Initializes a server that registers new workers."""
registrar_class, ranker_dump_path = get_worker_registrar_for(self)
n_tries = 0
self.server = None
last_error = None
while self.server is None and n_tries < 10:
try:
n_tries += 1
self.server = ThreadPoolServer(service=registrar_class, nbThreads=1, port=self.port)
except socket.error as e:
log_warn('Port %d in use, trying to use a higher port...' % self.port)
self.port += 1
last_error = e
if self.server is None:
if last_error is not None:
raise last_error
raise Exception('Could not initialize server')
self.services = set()
self.free_services = deque()
self.pending_requests = set()
self.jobs = []
self.server_thread = Thread(target=self.server.start)
self.server_thread.setDaemon(True)
self.server_thread.start()
self.ranker_dump_path = ranker_dump_path
def _get_portion_bounds(self, portion_no):
"""(Head) return the offset and size of the specified portion of the training
data to be sent to a worker.
@param portion_no: the number of the portion whose bounds should be computed
@rtype: tuple
@return: offset and size of the desired training data portion
"""
portion_size, bigger_portions = divmod(len(self.loc_ranker.train_trees), self.data_portions)
if portion_no < bigger_portions:
return (portion_size + 1) * portion_no, portion_size + 1
else:
return portion_size * portion_no + bigger_portions, portion_size
raise NotImplementedError()
def save_to_file(self, model_fname):
"""Saving just the "plain" perceptron ranker model to a file; discarding all the
parallel stuff that can't be stored in a pickle anyway."""
self.loc_ranker.save_to_file(model_fname)
class RankerTrainingService(Service):
def __init__(self, conn_ref):
super(RankerTrainingService, self).__init__(conn_ref)
self.ranker_inst = None
def exposed_init_training(self, head_ranker_path):
"""(Worker) Just deep-copy all necessary attributes from the head instance."""
tstart = time.time()
log_info('Initializing training...')
self.ranker_inst = load_ranker(head_ranker_path)
log_info('Training initialized. Time taken: %f secs.' % (time.time() - tstart))
def exposed_training_pass(self, w, pass_no, rnd_seed, data_offset, data_len):
"""(Worker) Run one pass over a part of the training data.
@param w: initial perceptron weights (pickled)
@param pass_no: pass number (for logging purposes)
@param rnd_seed: random generator seed for shuffling training examples
@param data_offset: training data portion start
@param data_len: training data portion size
@return: updated perceptron weights after passing the selected data portion (pickled)
"""
log_info('Training pass %d with data portion %d + %d' %
(pass_no, data_offset, data_len))
# use the local ranker instance
ranker = self.ranker_inst
# import current feature weights
tstart = time.time()
ranker.set_weights(pickle.loads(w))
log_info('Weights loading: %f secs.' % (time.time() - tstart))
# save rest of the training data to temporary variables, set just the
# required portion for computation
all_train_das = ranker.train_das
ranker.train_das = ranker.train_das[data_offset:data_offset + data_len]
all_train_trees = ranker.train_trees
ranker.train_trees = ranker.train_trees[data_offset:data_offset + data_len]
all_train_feats = ranker.train_feats
ranker.train_feats = ranker.train_feats[data_offset:data_offset + data_len]
all_train_sents = ranker.train_sents
ranker.train_sents = ranker.train_sents[data_offset:data_offset + data_len]
all_train_order = ranker.train_order
ranker.train_order = list(range(len(ranker.train_trees)))
if ranker.randomize:
rnd.seed(rnd_seed)
rnd.shuffle(ranker.train_order)
# do the actual computation (update w)
ranker._training_pass(pass_no)
# return the rest of the training data to member variables
ranker.train_das = all_train_das
ranker.train_trees = all_train_trees
ranker.train_feats = all_train_feats
ranker.train_sents = all_train_sents
ranker.train_order = all_train_order
# return the result of the computation
log_info('Training pass %d / %d / %d done.' % (pass_no, data_offset, data_len))
tstart = time.time()
dump = pickle.dumps((ranker.get_weights(), ranker.get_diagnostics()), pickle.HIGHEST_PROTOCOL)
log_info('Weights saving: %f secs.' % (time.time() - tstart))
return dump
def run_worker(head_host, head_port, debug_out=None):
# setup debugging output, if applicable
if debug_out is not None:
set_debug_stream(file_stream(debug_out, mode='w'))
# start the server (in the background)
log_info('Creating worker server...')
server = ThreadPoolServer(service=RankerTrainingService, nbThreads=1)
server_thread = Thread(target=server.start)
server_thread.start()
my_host = socket.getfqdn()
log_info('Worker server created at %s:%d. Connecting to head at %s:%d...' %
(my_host, server.port, head_host, head_port))
# notify main about this server
conn = connect(head_host, head_port, config={'allow_pickle': True})
conn.root.register_worker(my_host, server.port)
conn.close()
log_info('Worker is registered with the head.')
# now serve until we're killed (the server thread will continue to run)
server_thread.join()
if __name__ == '__main__':
try:
host = sys.argv[1]
port = int(sys.argv[2])
except:
sys.exit('Usage: ' + sys.argv[0] + ' <head-address> <head-port>')
run_worker(host, port)
|
ChorusNGSfilter.py | import argparse
import sys
from Choruslib import jellyfish
import os
from multiprocessing import Pool, Process
from pyfasta import Fasta
import pyBigWig
import math
def main():
args = check_options(get_options())
# jfgeneratorscount(jfpath, mer, output, generators,threads=1, size='100M'):
# make generators
print(args.input)
jellyfish.makegenerator(filenames=args.input.split(','), type=args.gzip, generators='generators')
jfkmerfile = args.output+'.jf'
bwfile = args.output+'.bw'
outfilename = args.output
jellyfish.jfgeneratorscount(jfpath=args.jellyfish, mer=args.kmer, output=jfkmerfile,
generators='generators',threads=args.threads, size='100M')
spsize = 10000000
fastain = Fasta(args.genome)
bw = pyBigWig.open(bwfile, "w")
seqlenth = dict()
seqname = dict()
genomesize = 0
for chrom in sorted(fastain.keys()):
infor = chrom.split()
seqlenth[infor[0]] = len(fastain[chrom])
seqname[infor[0]] = chrom
genomesize += seqlenth[infor[0]]
print("Genome Size: %s" % genomesize)
bw.addHeader(list(seqlenth.items()))
jfscoerlist = list()
for seqfullname in sorted(fastain.keys()):
infor = seqfullname.split()
chrlen = len(fastain[seqfullname])
if chrlen < spsize:
start = 0
end = chrlen - 1
jfscoer = jellyfish.JFNGSScoer(jfpath=args.jellyfish, jfkmerfile=jfkmerfile, mer=args.kmer,
start=start, end=end, seqfullname=seqfullname, pyfasta=fastain)
jfscoerlist.append(jfscoer)
else:
chrblock = int(chrlen / spsize) + 1
for i in range(chrblock):
start = i * spsize
end = start + spsize - 1
if i > 0:
start = start - args.kmer + 1
if end >= chrlen:
end = chrlen - 1
jfscoer = jellyfish.JFNGSScoer(jfpath=args.jellyfish, jfkmerfile=jfkmerfile, mer=args.kmer,
start=start, end=end, seqfullname=seqfullname, pyfasta=fastain)
jfscoerlist.append(jfscoer)
tmppath = os.path.dirname(args.output)
jfsllength = int(len(jfscoerlist) / args.threads + 1)
for jt in range(jfsllength):
if jt == jfsllength:
nowlist = jfscoerlist[jt * args.threads:]
else:
nowlist = jfscoerlist[(jt * args.threads):((jt + 1) * args.threads)]
processes = list()
for jfscoer in nowlist:
p = Process(target=jellyfish.jfngsscoerlargegenome, args=(jfscoer,tmppath))
processes.append(p)
for p in processes:
p.start()
for p in processes:
p.join()
# jfngsscoerlargegenome
for jfscoer in nowlist:
tmpfile = jfscoer.seqname + '_' + str(jfscoer.start) + "_" + str(jfscoer.end)
tmpfilename = os.path.join(tmppath, tmpfile)
score = list()
with open(tmpfilename) as inio:
for i in inio:
score = i.rstrip().split()
bw.addEntries(jfscoer.seqname, jfscoer.start, values=list(map(float, score)), span=1, step=1)
print(jfscoer.seqname, jfscoer.start, 'OK')
inio.close()
os.remove(tmpfilename)
bw.close()
bwforcount = pyBigWig.open(bwfile)
outio = open(outfilename, 'w')
with open(args.probe) as inio:
for i in inio:
#(chrom, start, end, seq) = i.rstrip().split()
probeloc = i.rstrip().split()
chrom = probeloc[0]
start = probeloc[1]
end = probeloc[2]
seq = probeloc[3]
score = sum(bwforcount.values(chrom, int(start) - 1, int(end) - args.kmer))
if math.isnan(score):
score = 0
print(chrom, start, end, seq, int(score), '+', sep='\t', file=outio)
outio.close()
print("finished!")
def check_options(parser):
args = parser.parse_args()
if args.jellyfish:
if not os.path.exists(args.jellyfish):
print("Can not locate jellyfish, please input full path of jellyfish\n")
parser.print_help()
sys.exit(1)
jellyfishversion = jellyfish.jfversion(args.jellyfish)
if jellyfishversion == 'None':
print("Can not locate jellyfish, please input full path of jellyfish\n")
parser.print_help()
sys.exit(1)
else:
jellyfishpath = which('jellyfish')
if jellyfishpath:
jellyfishversion = jellyfish.jfversion(jellyfishpath[0])
if jellyfishversion == 'None':
print("Can not locate jellyfish, please input full path of jellyfish\n")
parser.print_help()
sys.exit(1)
else:
args.jellyfish = jellyfishpath[0]
else:
print("Can not locate jellyfish, please input full path of jellyfish\n")
parser.print_help()
sys.exit(1)
# End check jellyfish
if not os.path.exists(args.genome):
print("Can not locate genome file, please input genome file.\n")
parser.print_help()
sys.exit(1)
if not os.path.exists(args.probe):
print("Can not locate probe file, please input genome file.\n")
parser.print_help()
sys.exit(1)
if args.input:
inputfiles = args.input.split(',')
for inputfile in inputfiles:
if not os.path.exists(inputfile):
print("Can not locate %s file.\n" % inputfile)
parser.print_help()
sys.exit(1)
else:
print("Can not locate input file, please input input file.\n")
parser.print_help()
sys.exit(1)
return args
def which(filename):
"""docstring for which"""
locations = os.environ.get("PATH").split(os.pathsep)
candidates = []
for location in locations:
candidate = os.path.join(location, filename)
if os.path.isfile(candidate):
candidates.append(candidate)
return candidates
def get_options():
parser = argparse.ArgumentParser(description="ChorusNGSfilter for counting Oligo FISH probe k-mer score using NGS data", prog='ChorusNGSfilter',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="Example:\n"
" ChorusNGSfilter -i 1.fq.gz,2.fq.gz -z gz -t 4 -g TAIR10_chr_all.fas \\ \n"
" -j /opt/software/jellyfish/bin/jellyfish -p probe.bed -o output.bed"
)
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
parser.add_argument('-j', '--jellyfish', dest='jellyfish', help='The path where Jellyfish software installed')
parser.add_argument('-g', '--genome', dest='genome', help='Fasta format genome file, should include all sequences from genome', required=True)
parser.add_argument('-i', '--input', dest='input',
help='Fastq format input files contain reads from whole genome shotgun sequencing, files can be gzipped.'
' Multiple files separate with \",\". For example: 1.fq.gz,2.fq.gz ', required=True, type=str)
parser.add_argument('-z', '--gzipped', dest='gzip', help='Input fastq file is gzipped(gz) or uncompressed(text). (Default: gz)', choices=('gz', 'text'), default='gz', required=True)
# parser.add_argument('-s', '--save', dest='saved', help='result saved folder', default='probes')
parser.add_argument('-t', '--threads', dest='threads', help='Number of threads or CPUs to use. (Default: 1)',
default=1, type=int)
parser.add_argument('-k', '--kmer', dest='kmer', help='Length of k-mer used for counting k-mers in input fastq files. (Default: 17)', default=17, type=int)
parser.add_argument('-p', '--probe', dest='probe', help='The bed format probe file generated by Chorus')
parser.add_argument('-o', '--output', dest='output', help='Output bed format probe file with k-mer score. (Default: output.bed)', default='output.bed')
# args = parser.parse_args()
return parser
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
sys.stderr.write("User interrupt\n")
sys.exit(0)
|
test_queue.py | # AMZ-Driverless
# Copyright (c) 2019 Authors:
# - Huub Hendrikx <hhendrik@ethz.ch>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
import unittest
import multiprocessing
import time
from sqlalchemy.orm import Query
import rbb_server_test.database
from rbb_server.controllers.queue_controller import dequeue_task_inner, put_task_inner
from rbb_server.model.database import Database, Task, User
from rbb_swagger_server.models.task_detailed import TaskDetailed
class TestQueue(unittest.TestCase):
@classmethod
def setUpClass(cls):
rbb_server_test.database.setup_database_for_test()
@classmethod
def tearDownClass(cls):
if Database.get_session():
Database.get_session().remove()
def test_deque_under_load(self):
fill_start_time = time.time()
number_of_processes = max(2, multiprocessing.cpu_count())
number_of_queued_items = 2000 * number_of_processes
Database.get_session().execute('''TRUNCATE TABLE task_queue CASCADE''')
Database.get_session().commit()
# Fill queue
for i in range(number_of_queued_items):
task = Task()
task.priority = 0
task.description = "T%d" % i
task.assigned_to = ""
task.created = datetime.datetime.now()
task.last_updated = datetime.datetime.now()
task.state = 0
task.task = "none"
task.success = False
task.result = {}
task.runtime = 0
task.worker_labels = ""
task.configuration = {}
Database.get_session().add(task)
Database.get_session().commit()
admin_user = Database.get_session().query(User).filter(User.alias == 'admin').first()
rbb_server_test.database.close_database()
fill_end_time = time.time()
dequeue_start_time = time.time()
# Start dequeuing processes
processes = []
def run(q, worker_name):
rbb_server_test.database.init_database_connection_for_test()
tasks = []
number_of_collisions = 0
while True:
task = dequeue_task_inner(worker_name, "", "", admin_user)
if isinstance(task, TaskDetailed):
tasks.append(task.description)
task.state = 100
put_task_inner(task.identifier, task, admin_user)
else:
# Check if the queue is empty, if not then there was a collision
count = Database.get_engine().execute('''select count(uid) from task_queue where assigned_to='' ''').scalar()
if count > 0:
number_of_collisions += 1
else:
rbb_server_test.database.close_database()
q.put((tasks, number_of_collisions))
return
for i in range(number_of_processes):
queue = multiprocessing.Queue()
process = multiprocessing.Process(target=run, args=(queue, "w%d" % i) )
process.start()
processes.append((process, queue))
dequeued_tasks = []
tasks_per_process = []
total_number_of_collisions = 0
for i in range(number_of_processes):
process, queue = processes[i]
process.join()
tasks_dequeued, number_of_collisions = queue.get()
tasks_per_process.append(len(tasks_dequeued))
dequeued_tasks.extend(tasks_dequeued)
total_number_of_collisions += number_of_collisions
dequeue_end_time = time.time()
print("Filling the database took %f seconds" % (fill_end_time - fill_start_time))
print("Dequeueing took %f seconds (%f per task)" %
(dequeue_end_time - dequeue_start_time, (dequeue_end_time - dequeue_start_time) / number_of_queued_items))
print("Number of tasks dequeued per process: ", tasks_per_process)
print("Number of collisions: ", total_number_of_collisions)
self.assertEqual(len(dequeued_tasks), number_of_queued_items)
|
test_cost_time.py | from funboost.concurrent_pool.custom_threadpool_executor import CustomThreadpoolExecutor
import time
import threading
from concurrent.futures import ThreadPoolExecutor
pool = CustomThreadpoolExecutor(10)
pool2 = ThreadPoolExecutor(10)
t1 = time.time()
lock = threading.Lock()
def f(x):
with lock:
print(x)
for i in range(10000):
# pool.submit(f,i)
pool2.submit(f,i)
# threading.Thread(target=f,args=(i,)).start()
# f(i)
print("&&&",time.time() -t1) |
gopro.py | # gopro.py/Open GoPro, Version 2.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Wed, Sep 1, 2021 5:05:47 PM
"""Implements top level interface to GoPro module."""
from __future__ import annotations
import time
import enum
import queue
import logging
import threading
from queue import Queue
from pathlib import Path
from typing import Any, Dict, Final, Optional, Type, Callable, Union, Generic, Pattern
import wrapt
import requests
from open_gopro.api.v1_0.api import ApiV1_0
from open_gopro.exceptions import InvalidOpenGoProVersion, ResponseTimeout, InvalidConfiguration
from open_gopro.ble import BLEController, UUID, BleDevice
from open_gopro.ble.adapters import BleakWrapperController
from open_gopro.wifi import WifiController
from open_gopro.wifi.adapters import Wireless
from open_gopro.util import SnapshotQueue
from open_gopro.responses import GoProResp
from open_gopro.constants import CmdId, ErrorCode, StatusId, QueryCmdId, ProducerType
from open_gopro.api import (
Api,
api_versions,
BleCommands,
BleSettings,
BleStatuses,
WifiCommands,
WifiSettings,
Params,
)
from open_gopro.communication_client import GoProBle, GoProWifi
logger = logging.getLogger(__name__)
KEEP_ALIVE_INTERVAL: Final = 60
WRITE_TIMEOUT: Final = 10
HTTP_GET_RETRIES: Final = 5
@wrapt.decorator
def _ensure_initialized_acquire_ready_semaphore(
wrapped: Callable, instance: GoPro, args: Any, kwargs: Any
) -> Callable:
"""If the instance is initialized, acquire the semaphore before doing anything.
Raises:
GoProNotInitialized: The function can't be used yet because the GoPro device isn't initialized
Returns:
Callable: Function to call after semaphore has been acquired
"""
if instance._maintain_ble:
logger.debug(f"{wrapped.__name__} acquiring semaphore")
with instance._ready:
logger.debug(f"{wrapped.__name__} has the semaphore")
ret = wrapped(*args, **kwargs)
else:
ret = wrapped(*args, **kwargs)
if instance._maintain_ble:
logger.debug(f"{wrapped.__name__} released the semaphore")
return ret
class GoPro(GoProBle, GoProWifi, Generic[BleDevice]):
"""The top-level BLE and Wifi interface to a GoPro device.
See `Open GoPro <https://gopro.github.io/OpenGoPro/python_sdk>`_ for complete documentation.
This will handle for BLE:
- discovering device
- establishing connections
- discovering GATT characteristics
- enabling notifications
- discovering Open GoPro version
- transferring data
This will handle for Wifi:
- finding SSID and password
- establishing Wifi connection
- transferring data
It will also do some synchronization, etc:
- ensuring camera is ready / not encoding before transferring data
- sending keep alive signal periodically
If no target arg is passed in, the first discovered GoPro device will be connected to.
It can be used via context manager:
>>> with GoPro() as gopro:
>>> gopro.ble_command.set_shutter(gopro.params.Shutter.ON)
Or without:
>>> gopro = GoPro()
>>> gopro.open()
>>> gopro.ble_command.set_shutter(gopro.params.Shutter.ON)
>>> gopro.close()
Args:
identifier (Pattern, optional): Last 4 of camera name / serial number (i.e. 0456 for GoPro0456). Defaults
to None (i.e. connect to first discovered GoPro)
ble_adapter (BLEController, optional): Class used to control computer's BLE connection / send data.
Defaults to BleakController().
wifi_adapter (WifiController, optional): Class used to control computer's Wifi connection / send data.
Defaults to Wireless().
wifi_interace (str, optional): Set to specify the wifi interface the local machine will use to connect
to the GoPro. If None (or not set), first discovered interface will be used.
enable_wifi (bool, optional): Optionally do not enable Wifi if set to False. Defaults to True.
maintain_ble (bool, optional): Optionally do not perform BLE housekeeping if set to False (used for
testing). Defaults to True.
"""
_base_url = "http://10.5.5.9:8080/" #: Hard-coded Open GoPro base URL
class _InternalState(enum.IntFlag):
"""State used to manage whether the GoPro instance is ready or not."""
READY = 0
ENCODING = 1 << 0
SYSTEM_BUSY = 1 << 1
def __init__(
self,
target: Optional[Union[Pattern, BleDevice]] = None,
ble_adapter: Type[BLEController] = BleakWrapperController,
wifi_adapter: Type[WifiController] = Wireless,
wifi_interface: Optional[str] = None,
enable_wifi: bool = True,
maintain_ble: bool = True,
) -> None:
# Store initialization information
self._enable_wifi_during_init = enable_wifi
self._maintain_ble = maintain_ble
# Initialize GoPro Communication Client
GoProBle.__init__(self, ble_adapter(), self._disconnect_handler, self._notification_handler, target)
GoProWifi.__init__(self, wifi_adapter(wifi_interface))
# We start with version 1.0. It will be updated once we query the version
self._api: Api = ApiV1_0(self, self)
# Current accumulating synchronous responses, indexed by UUID. This assumes there can only be one active response per UUID
self._active_resp: Dict[UUID, GoProResp] = {}
# Responses that we are waiting for.
self._sync_resp_wait_q: SnapshotQueue = SnapshotQueue()
# Synchronous response that has been parsed and are ready for their sender to receive as the response.
self._sync_resp_ready_q: SnapshotQueue = SnapshotQueue()
# For outputting asynchronously received information
self._out_q: "Queue[GoProResp]" = Queue()
self._listeners: Dict[ProducerType, bool] = {}
# Set up events
self._ble_disconnect_event = threading.Event()
self._ble_disconnect_event.set()
# Set up threads
self._threads_waiting = 0
# If we are to perform BLE housekeeping
if self._maintain_ble:
self._threads_waiting += 2
# Set up thread to send keep alive
self._keep_alive_thread = threading.Thread(
target=self._periodic_keep_alive, name="keep_alive", daemon=True
)
# Set up thread to block until camera is ready to receive commands
self._ready = threading.BoundedSemaphore(value=1)
self._state_condition = threading.Condition()
self._internal_state = GoPro._InternalState.ENCODING | GoPro._InternalState.SYSTEM_BUSY
self._state_thread = threading.Thread(target=self._maintain_state, name="state", daemon=True)
def __enter__(self) -> "GoPro": # pylint: disable=missing-return-doc
self.open()
return self
def __exit__(self, *_: Any) -> None:
self.close()
def __del__(self) -> None:
self.close()
@property
def identifier(self) -> Optional[str]:
"""Get a unique identifier for this instance.
The identifier is the last 4 digits of the camera. That is, the same string that is used to
scan for the camera for BLE.
If no target has been provided and a camera is not yet found, this will be None
Returns:
Optional[str]: last 4 digits if available, else None
"""
return self._ble.identifier
@property
def is_ble_connected(self) -> bool:
"""Are we connected via BLE to the GoPro device?
Returns:
bool: True if yes, False if no
"""
return self._ble.is_connected
@property
def is_wifi_connected(self) -> bool:
"""Are we connected via Wifi to the GoPro device?
Returns:
bool: True if yes, False if no
"""
return self._wifi.is_connected
@property
def is_encoding(self) -> bool:
"""Is the camera currently encoding?
Returns:
bool: True if yes, False if no
"""
if not self._maintain_ble:
raise InvalidConfiguration("Not maintaining BLE state so encoding is not applicable")
return self._internal_state & GoPro._InternalState.ENCODING == 1
@property
def is_busy(self) -> bool:
"""Is the camera currently performing a task that prevents it from accepting commands?
Returns:
bool: True if yes, False if no
"""
if not self._maintain_ble:
raise InvalidConfiguration("Not maintaining BLE state so busy is not applicable")
return self._internal_state & GoPro._InternalState.SYSTEM_BUSY == 1
@property
def version(self) -> float:
"""The API version does the connected camera supports
Note! If we have not yet connected and query the peer to find its version, this will be set to 1.0
Returns:
float: supported version in decimal form
"""
return float(self._api.version)
@property
def ble_command(self) -> BleCommands:
"""Used to call the version-specific BLE commands
Returns:
BleCommands: the commands
"""
return self._api.ble_command
@property
def ble_setting(self) -> BleSettings:
"""Used to access the version-specific BLE settings
Returns:
BleSettings: the settings
"""
return self._api.ble_setting
@property
def ble_status(self) -> BleStatuses:
"""Used to access the version-specific BLE statuses
Returns:
BleStatuses: the statuses
"""
return self._api.ble_status
@property
def wifi_command(self) -> WifiCommands:
"""Used to access the version-specific Wifi commands
Returns:
WifiCommands: the commands
"""
return self._api.wifi_command
@property
def wifi_setting(self) -> WifiSettings:
"""Used to access the version-specific Wifi settings
Returns:
WifiSettings: the settings
"""
return self._api.wifi_setting
@property
def params(self) -> Type[Params]:
"""Version-specific parameters for BLE / Wifi commands, statuses, and settings
Returns:
Type[Params]: the parameters
"""
return self._api.params
def open(self, timeout: int = 10, retries: int = 5) -> None:
"""Perform all initialization commands for ble and wifi
For BLE: scan and find device, establish connection, discover characteristics, configure queries
start maintenance, and get Open GoPro version..
For Wifi: discover SSID and password, enable and connect. Or disable if not using.
Args:
timeout (int, optional): How long to wait for each connection before timing out. Defaults to 10.
retries (int, optional): How many connection attempts before considering connection failed. Defaults to 5.
"""
# Establish BLE connection and start maintenance threads if desired
self._open_ble(timeout, retries)
# Find and configure API version
version = self.ble_command.get_open_gopro_api_version().flatten
version_str = f"{version.major}.{version.minor}"
try:
self._api = api_versions[version_str](self, self)
except KeyError as e:
raise InvalidOpenGoProVersion(version_str) from e
logger.info(f"Using Open GoPro API version {version_str}")
# Establish Wifi connection if desired
if self._enable_wifi_during_init:
self._open_wifi(timeout, retries)
else:
# Otherwise, turn off Wifi
logger.info("Turning off the camera's Wifi radio")
self.ble_command.enable_wifi_ap(False)
def close(self) -> None:
"""Safely stop the GoPro instance.
This will disconnect BLE and WiFI if applicable.
If not using the context manager, it is mandatory to call this before exiting the program in order to
prevent reconnection issues because the OS has never disconnected from the previous session.
"""
self._close_wifi()
self._close_ble()
def get_update(self, timeout: float = None) -> GoProResp:
"""Get a notification that we received from a registered listener.
If timeout is None, this will block until a notification is received.
The updates are received via FIFO
Args:
timeout (float, optional): Time to wait for a notification before returning. Defaults to None (wait forever)
Returns:
GoProResp: Received notification
"""
return self._out_q.get(timeout=timeout)
def keep_alive(self) -> bool:
"""Send a heartbeat to prevent the BLE connection from dropping.
This is sent automatically by the GoPro instance if its `maintain_ble` argument is not False.
Returns:
bool: True if it succeeded,. False otherwise
"""
return self.ble_setting.led.set(self.params.LED.BLE_KEEP_ALIVE).is_ok
##########################################################################################################
# End Public API
##########################################################################################################
@property
def _is_ble_initialized(self) -> bool:
"""Are we done
Returns:
bool: True if yes, False if no
"""
return self._threads_waiting == 0
def _maintain_state(self) -> None:
"""Thread to keep track of ready / encoding and acquire / release ready semaphore."""
self._ready.acquire()
while self.is_ble_connected:
internal_status_previous = self._internal_state
with self._state_condition:
self._state_condition.wait()
# If we were ready but not now we're not, acquire the semaphore
if internal_status_previous == 0 and self._internal_state != 0:
logger.debug("Control acquiring semaphore")
self._ready.acquire()
logger.debug("Control has semaphore")
# If we weren't ready but now we are, release the semaphore
elif internal_status_previous != 0 and self._internal_state == 0:
# If this is the first time, mark that we might now be initialized
if not self._is_ble_initialized:
self._threads_waiting -= 1
self._ready.release()
logger.debug("Control released semaphore")
self._threads_waiting += 1
logger.debug("Maintain state thread exiting...")
def _periodic_keep_alive(self) -> None:
"""Thread to periodically send the keep alive message via BLE."""
while self.is_ble_connected:
if not self._is_ble_initialized:
self._threads_waiting -= 1
try:
if self.keep_alive():
time.sleep(KEEP_ALIVE_INTERVAL)
except Exception: # pylint: disable=broad-except
# If the connection disconnects while we were trying to send, there can be any number
# of exceptions. This is expected and this thread will exit on the next while check.
pass
self._threads_waiting += 1
logger.debug("periodic keep alive thread exiting...")
def _register_listener(self, producer: ProducerType) -> None:
"""Register a producer to store notifications from.
The notifications can be accessed via the get_update() method.
Args:
producer (ProducerType): Producer to listen to.
"""
self._listeners[producer] = True
def _unregister_listener(self, producer: ProducerType) -> None:
"""Unregister a producer in order to stop listening to its notifications.
Args:
producer (ProducerType): Producer to stop listening to.
"""
if producer in self._listeners:
del self._listeners[producer]
def _open_ble(self, timeout: int = 10, retries: int = 5) -> None:
"""Connect the instance to a device via BLE.
Args:
device (BleDevice): Device to connect to
timeout (int, optional): Time in seconds before considering establishment failed. Defaults to 10 seconds.
retries (int, optional): How many tries to reconnect after failures. Defaults to 5.
Raises:
ConnectFailed: Connection could not be established
"""
# Establish connection, pair, etc.
self._ble.open(timeout, retries)
# Configure threads if desired
if self._maintain_ble:
self._state_thread.start()
self.ble_status.encoding_active.register_value_update()
self.ble_status.system_ready.register_value_update()
self._keep_alive_thread.start()
logger.info("BLE is ready!")
# TODO refactor this into smaller methods
def _notification_handler(self, handle: int, data: bytearray) -> None:
"""Receive notifications from the BLE controller.
Args:
handle (int): Attribute handle that notification was received on.
data (bytes): Bytestream that was received.
"""
# Convert handle to UUID
uuid = self._ble.gatt_table.handle2uuid(handle)
# Responses we don't care about. For now, just the BLE-spec defined battery characteristic
if uuid is UUID.BATT_LEVEL:
return
logger.debug(f'Received response on {uuid}: {data.hex(":")}')
# Add to response dict if not already there
if uuid not in self._active_resp:
self._active_resp[uuid] = GoProResp(self._parser_map, info=[uuid])
self._active_resp[uuid]._accumulate(data)
if self._active_resp[uuid].is_received:
response = self._active_resp[uuid]
response._parse()
# Handle internal statuses
if self._maintain_ble:
if (
response.cmd
in [
QueryCmdId.REG_STATUS_VAL_UPDATE,
QueryCmdId.GET_STATUS_VAL,
QueryCmdId.STATUS_VAL_PUSH,
]
and StatusId.ENCODING in response.data
):
with self._state_condition:
if response[StatusId.ENCODING] is True:
self._internal_state |= GoPro._InternalState.ENCODING
else:
self._internal_state &= ~GoPro._InternalState.ENCODING
self._state_condition.notify()
if (
response.cmd
in [
QueryCmdId.REG_STATUS_VAL_UPDATE,
QueryCmdId.GET_STATUS_VAL,
QueryCmdId.STATUS_VAL_PUSH,
]
and StatusId.SYSTEM_READY in response.data
):
with self._state_condition:
if response[StatusId.SYSTEM_READY] is True:
self._internal_state &= ~GoPro._InternalState.SYSTEM_BUSY
else:
self._internal_state |= GoPro._InternalState.SYSTEM_BUSY
self._state_condition.notify()
# Check if this is the awaited synchronous response (id matches). Note! these have to come in order.
response_claimed = False
if not self._sync_resp_wait_q.empty():
queue_snapshot = self._sync_resp_wait_q.snapshot()
if queue_snapshot[0].id is response.id:
# Dequeue it and put this on the ready queue
self._sync_resp_wait_q.get_nowait()
self._sync_resp_ready_q.put(response)
response_claimed = True
# If this wasn't the awaited synchronous response...
if not response_claimed:
logger.info(f"--(ASYNC)--> {response}")
# See if there are any registered responses that need to be enqueued for client consumption
for key in list(response.data.keys()):
if (response.cmd, key) not in self._listeners:
del response.data[key]
# Enqueue the response if there is anything left
if len(response.data) > 0:
self._out_q.put(response)
# Clear active response from response dict
del self._active_resp[uuid]
def _close_ble(self) -> None:
if self.is_ble_connected and self._ble is not None:
logger.info("Terminating the BLE connection")
self._ble_disconnect_event.clear()
self._ble.close()
self._ble_disconnect_event.wait()
def _disconnect_handler(self, _: Any) -> None:
"""Handle disconnects"""
if self._ble_disconnect_event.is_set():
logger.error("Ble connection terminated unexpectedly.")
# TODO how to handle unexpected disconnects?
# raise ConnectionTerminated("Ble connection terminated.")
self._ble_disconnect_event.set()
def _write_characteristic_receive_notification(self, uuid: UUID, data: bytearray) -> GoProResp:
"""Perform a BLE write and wait for a corresponding notification response.
There should hopefully not be a scenario where this needs to be called directly as it is generally
called from the instance's API delegate (i.e. self)
Args:
uuid (UUID): UUID to write to
data (bytearray): data to write
Raises:
Exception: Unexpected functionality occurred
Returns:
GoProResp: parsed notification response data
"""
assert self._ble is not None
# Acquire ready semaphore unless we are initializing or this is a Set Shutter Off command
have_semaphore = False
if (
self._maintain_ble
and self._is_ble_initialized
and not (
GoProResp._from_write_command(self._parser_map, uuid, data).id is CmdId.SET_SHUTTER
and data[-1] == 0
)
):
logger.debug(
f"{GoProResp._from_write_command(self._parser_map, uuid, data).id} acquiring semaphore"
)
self._ready.acquire()
logger.debug(f"{GoProResp._from_write_command(self._parser_map, uuid, data).id} has semaphore")
have_semaphore = True
# Store information on the response we are expecting
self._sync_resp_wait_q.put(GoProResp._from_write_command(self._parser_map, uuid, data))
# Perform write
self._ble.write(uuid.value, data)
# Wait to be notified that response was received
try:
response = self._sync_resp_ready_q.get(timeout=WRITE_TIMEOUT)
except queue.Empty as e:
logger.error(f"Response timeout of {WRITE_TIMEOUT} seconds!")
raise ResponseTimeout(WRITE_TIMEOUT) from e
# Check status
try:
if response.status is not ErrorCode.SUCCESS:
logger.warning(f"Received non-success status: {response.status}")
except AttributeError:
logger.error("Not able to parse status from response")
if self._maintain_ble:
# If this was set shutter on, we need to wait to be notified that encoding has started
if response.cmd is CmdId.SET_SHUTTER and data[-1] == 1:
while not self.is_encoding:
# We don't want to use the application's loop, can't use any of our loops due to potential deadlock,
# and don't want to spawn a new thread for this. So just poll ¯\_(ツ)_/¯
# A read to an int is atomic anyway.
time.sleep(0.1)
# Release the semaphore if we acquired it
if have_semaphore:
self._ready.release()
logger.debug(
f"{GoProResp._from_write_command(self._parser_map, uuid, data).id} released the semaphore"
)
return response
def _read_characteristic(self, uuid: UUID) -> GoProResp:
"""Read a characteristic's data by UUID.
There should hopefully not be a scenario where this needs to be called directly as it is generally
called from the instance's delegates (i.e. self.command, self.setting, self.ble_status)
Args:
uuid (UUID): characteristic data to read
Returns:
bytearray: read data
"""
assert self._ble is not None
have_semaphore = False
if self._maintain_ble:
logger.debug(f"{uuid} acquiring semaphore")
self._ready.acquire()
logger.debug(f"{uuid} has the semaphore")
have_semaphore = True
received_data = self._ble.read(uuid.value)
if self._maintain_ble and have_semaphore:
self._ready.release()
logger.debug(f"{uuid} released the semaphore")
return GoProResp._from_read_response(self._parser_map, uuid, received_data)
def _open_wifi(self, timeout: int = 15, retries: int = 5) -> None:
"""Connect to a GoPro device via Wifi.
Args:
enable (bool): whether to enable or disable wifi
timeout (int, optional): Time before considering establishment failed. Defaults to 15 seconds.
retries (int, optional): How many tries to reconnect after failures. Defaults to 5.
Raises:
Exception: Wifi failed to connect.
"""
logger.info("Discovering Wifi AP info and enabling via BLE")
password = self.ble_command.get_wifi_password().flatten
ssid = self.ble_command.get_wifi_ssid().flatten
self.ble_command.enable_wifi_ap(True)
self._wifi.open(ssid, password, timeout, retries)
def _close_wifi(self) -> None:
"""Terminate the Wifi connection."""
if hasattr(self, "_wifi"): # Corner case where instantication fails before superclass is initialized
self._wifi.close()
@_ensure_initialized_acquire_ready_semaphore
def _get(self, url: str) -> GoProResp:
"""Send an HTTP GET request to an Open GoPro endpoint.
There should hopefully not be a scenario where this needs to be called directly as it is generally
called from the instance's delegates (i.e. self.wifi_command and self.wifi_status)
Args:
url (str): endpoint URL
Returns:
GoProResp: response
"""
url = GoPro._base_url + url
logger.debug(f"Sending: {url}")
response: Optional[GoProResp] = None
for retry in range(HTTP_GET_RETRIES):
try:
request = requests.get(url)
request.raise_for_status()
response = GoProResp._from_http_response(self._parser_map, request)
break
except requests.exceptions.HTTPError as e:
# The camera responded with an error. Break since we successfully sent the command and attempt
# to continue
logger.warning(e)
response = GoProResp._from_http_response(self._parser_map, e.response)
break
# TODO figure out why these are failing. For now just retry
except requests.exceptions.ConnectionError as e:
logger.warning(repr(e))
logger.warning("Retrying to send the command...")
if retry == HTTP_GET_RETRIES - 1:
raise ResponseTimeout(HTTP_GET_RETRIES) from e
assert response is not None
return response
@_ensure_initialized_acquire_ready_semaphore
def _stream_to_file(self, url: str, file: Path) -> None:
"""Send an HTTP GET request to an Open GoPro endpoint to download a binary file.
There should hopefully not be a scenario where this needs to be called directly as it is generally
called from the instance's delegates (i.e. self.wifi_command and self.wifi_status)
Args:
url (str): endpoint URL
file (Path): location where file should be downloaded to
"""
url = GoPro._base_url + url
logger.debug(f"Sending: {url}")
with requests.get(url, stream=True) as request:
request.raise_for_status()
with open(file, "wb") as f:
logger.debug(f"receiving stream to {file}...")
for chunk in request.iter_content(chunk_size=8192):
f.write(chunk)
|
game.py | from threading import Thread
import sys
from pymuse.signal import SignalData
from datetime import datetime
import numpy as np
from enum import Enum
from queue import Empty
from mindpong.model.player import Player, SIGNAL_NAMES, PlayerName
from mindpong.model.mathexercise import MathExercise
DEFAULT_PORT_PLAYER_ONE = 5001
DEFAULT_PORT_PLAYER_TWO = 5002
TIMEOUT_READ_DATA = 0.05 # seconds
class GameState(Enum):
INITIAL = 0,
IN_PLAY = 1,
FINISHED = 2
class Game():
""" Represents the game state """
def __init__(self, signals_callback):
self._state: GameState = GameState.INITIAL
self.winner = None
self.callbacks = signals_callback
self.players = [
Player(PlayerName.PLAYER_ONE, signals_callback, DEFAULT_PORT_PLAYER_ONE),
Player(PlayerName.PLAYER_TWO, signals_callback, DEFAULT_PORT_PLAYER_TWO)
]
self.math_exercices = MathExercise()
self._game_counter = 0
@property
def state(self):
return self._state
@state.setter
def state(self, new_state):
""" sets the state of the game """
self._state = new_state
def begin(self):
for player in self.players:
player.start("Game #%i - %s"%(self._game_counter, datetime.now().strftime("%Y-%m-%d-%H_%M_%S")))
player.is_playing = True
self._update_thread = Thread(target=self._update_signal)
self._update_thread.start()
def end(self):
for player in self.players:
player.stop()
self._game_counter += 1
def _update_signal(self):
print("Game started with " + str(len(self.players)) + " players")
data = [np.nan] * len(self.players)
while(self._state is GameState.IN_PLAY):
try:
data = self._get_players_data(data)
except SystemExit:
self._state = GameState.INITIAL
break
if self._has_undefined_signal(data):
print("Warning: cannot get signal for player no "
+ self._get_index_without_signal(data))
continue
for callback in self.callbacks:
callback(data)
for player in self.players:
player.stop()
def _get_players_data(self, data):
for i, player in enumerate(self.players):
try:
while True:
# Try to pop until we have the newest data for the player
# only conserves the values elements
data[i] = self._get_mean_signal(player)
except Empty:
pass
return data
def _get_mean_signal(self, player):
data: SignalData = player.signals.read(SIGNAL_NAMES[0], TIMEOUT_READ_DATA)
return (data.time, np.nanmean(data.values))
def _has_undefined_signal(self, data):
return np.any([x is np.nan for x in data if x is not SignalData])
def _get_index_without_signal(self, data):
undefined_signal = [x is np.nan for x in data if x is not SignalData]
return " ".join([str(i) for i, x in enumerate(undefined_signal) if x])
|
server.py | import rclpy
from rclpy.node import Node
import threading
from threading import Lock
import uuid
import camera.overlay_lib as overlay_lib
# import board
from std_msgs.msg import String
from std_msgs.msg import Int32MultiArray, Int16
from sensor_msgs.msg import Joy, Imu, FluidPressure, Temperature
import argparse
import cv2
import time
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from .camera import Camera
from .log import logger
from .debounce import ButtonHandler
URL_PATH_MJPG = "/camera.mjpg"
URL_PATH_FAVICON = "/favicon.ico"
SLEEP_IN_SEC = 0.050
x = 0
y = 0
display_config = 0
power_info = "N/A"
CPU_info = "N/A"
euler = [0.0, 0.0, 0.0]
temp = "N/A"
alt = "N/A"
diff_fps = 1
flash_message = ""
take_snapshot = False
class CameraHandler(BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
self.document_root = server.get_document_root()
self.camera = server.get_camera()
# https://www.tutorialkart.com/opencv/python/opencv-python-get-image-size/
self.frame_shape = self.camera.get_frame(SLEEP_IN_SEC).shape
super(CameraHandler, self).__init__(request, client_address, server)
def flash_message(self, text, frame, pos_x=int(200), pos_y=int(20), duration=3):
# self.camera.print_text(frame, pos_x, pos_y, text)
# parse x:self.camera = camera
thickness = 0
font = 0
font_size = 0.3
font_color = [255, 255, 0]
font_thickness = 1
cv2.putText(
frame,
str(text),
(int(pos_x), int(pos_y)),
font,
font_size,
font_color,
font_thickness,
cv2.LINE_AA,
)
self.clear_text(duration)
def c_text():
global flash_message
flash_message = ""
def clear_text(self, duration=3):
self.scheduler.add_job(
self.c_text,
"interval",
seconds=int(duration),
id="clear_text",
replace_existing=True,
)
def change_view(self):
if self.display_config >= 3:
self.display_config = 0
else:
self.display_config += 1
def save_snapshot(self, im):
# save snapshot when button is pressed down
file_path = "snapshots/" + str(uuid.uuid1()) + ".jpg"
# write snapshot to file (we use image value instead of camera because it's already in JPEG format)
with open(file_path, "wb") as f:
f.write(im)
def do_GET(self):
if self.path == URL_PATH_MJPG:
self.send_response(200)
self.send_header(
"Content-type", "multipart/x-mixed-replace; boundary=--jpgboundary"
)
self.end_headers()
while self.camera.is_opened():
global diff_fps, flash_message, take_snapshot
start_fps = time.time()
frame = self.camera.get_frame(SLEEP_IN_SEC)
# Does not work
if display_config == 0:
overlay_lib.drawCrosshair(
frame, self.frame_shape[1], self.frame_shape[0]
)
overlay_lib.draw_joy(
frame, x, y, self.frame_shape[1], self.frame_shape[0]
)
overlay_lib.draw_power(
frame, power_info, self.frame_shape[1], self.frame_shape[0]
)
overlay_lib.draw_CPU(
frame, CPU_info, self.frame_shape[1], self.frame_shape[0]
)
overlay_lib.draw_FPS(
frame,
"FPS: " + str(int(1 / float(diff_fps))),
self.frame_shape[1],
self.frame_shape[0],
)
overlay_lib.draw_IMU(
frame,
euler,
temp,
alt,
self.frame_shape[1],
self.frame_shape[0],
)
# self.camera.draw_power2(frame, "AAA")
elif display_config == 1:
overlay_lib.drawCrosshair(
frame, self.frame_shape[1], self.frame_shape[0]
)
elif display_config == 2:
continue
ret, jpg = cv2.imencode(".jpg", frame)
if take_snapshot:
self.save_snapshot(jpg)
take_snapshot = False
# jpg = self.camera.read_in_jpeg(SLEEP_IN_SEC, 1 / diff_fps)
if jpg is None:
continue
self.wfile.write("--jpgboundary".encode())
self.send_header("Content-type", "image/jpeg")
self.send_header("Content-length", str(jpg.nbytes))
self.end_headers()
self.wfile.write(jpg)
endtime_fps = time.time()
diff_fps = endtime_fps - start_fps
elif self.path == URL_PATH_FAVICON:
self.send_response(404)
self.end_headers()
self.wfile.write("favicon is not found".encode())
else:
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
with open(self.document_root + "/index.html", "r") as f:
self.wfile.write(f.read().encode())
logger.info("thread is stopping ... [{path}]".format(path=self.path))
class dbounce:
def __init__(self, pin, func, edge="both", bouncetime=200):
self.edge = edge
self.func = func
self.pin = pin
self.bouncetime = float(bouncetime) / 1000
self.lastpinval = self.pin
self.lock = threading.Lock()
def check(self, button, *args):
pinval = button
if (
pinval == 0 and self.lastpinval == 1
): # and (self.edge in ["falling", "both"]):
self.func(*args)
# print("release")
if (
pinval == 1 and self.lastpinval == 0
): # and (self.edge in ["rising", "both"]):
# self.func(*args)
a = 1
# print("pressed")
# print(pinval, self.lastpinval)
self.lastpinval = pinval
class Robot_Info(Node):
def __init__(self):
super().__init__("robot_info")
self.joy_topic = self.create_subscription(Joy, "joy", self.joy_topic, 10)
# self.move_topic = self.create_subscription(String, "in", self.joy_topic, 10)
self.CPU_topic = self.create_subscription(
String, "info_sys_CPU", self.CPU_topic, 10
)
self.power_topic = self.create_subscription(
String, "info_sys_power", self.power_topic, 10
)
self.imu_topic = self.create_subscription(Imu, "/imu", self.imu_topic, 10)
self.temp_topic = self.create_subscription(
Temperature, "/temp", self.temp_topic, 10
)
self.press_topic = self.create_subscription(
FluidPressure, "/press", self.press_topic, 10
)
self.init_buttons = True
def imu_topic(self, msg):
global euler
euler = euler_from_quaternion(
msg.orientation.x,
msg.orientation.y,
msg.orientation.z,
msg.orientation.w,
False,
1,
)
def temp_topic(self, msg):
global temp
temp = round(msg.temperature, 1)
def press_topic(self, msg):
global alt
alt = int(get_altitude(msg.fluid_pressure))
def power_topic(self, msg):
global power_info
power_info = msg.data
def power_topic(self, msg):
global power_info
power_info = msg.data
def CPU_topic(self, msg):
global CPU_info
CPU_info = msg.data
def take_snapshot(self, argum="N/A"):
global take_snapshot
take_snapshot = True
# print("SNAP!!!!" + str(argum))
def joy_topic(self, msg):
global x, y, display_config
x = round(msg.axes[0], 1)
y = round(msg.axes[1], 1)
if msg.buttons[9] == 1:
# self.camera.change_view()
if self.display_config >= 3:
self.display_config = 0
else:
self.display_config += 1
if self.init_buttons == True:
self.cb = dbounce(msg.buttons[5], self.take_snapshot, ["kmkmkm"])
self.init_buttons = False
if self.init_buttons == False:
try:
self.cb.check(msg.buttons[5])
except Exception:
print("ERROR: " + str(Exception))
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
def set_camera(self, camera):
self.camera = camera
def get_camera(self):
return self.camera
def set_document_root(self, document_root):
self.document_root = document_root
def get_document_root(self):
return self.document_root
# Probably better to define either a message or a common library
import subprocess
def get_ip_address(interface):
cmd = (
"ifconfig %s | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1'"
% interface
)
return subprocess.check_output(cmd, shell=True).decode("ascii")[:-1]
import math
G_TO_MPSS = 9.80665
def get_altitude(pressure: float, sea_level_hPa: float = 1013.25) -> float:
"""
the conversion uses the formula:
h = (T0 / L0) * ((p / P0)**(-(R* * L0) / (g0 * M)) - 1)
where:
h = height above sea level
T0 = standard temperature at sea level = 288.15
L0 = standard temperatur elapse rate = -0.0065
p = measured pressure
P0 = static pressure = 1013.25
g0 = gravitational acceleration = 9.80665
M = mloecular mass of earth's air = 0.0289644
R* = universal gas constant = 8.31432
Given the constants, this works out to:
h = 44330.8 * (1 - (p / P0)**0.190263)
Arguments:
pressure {float} -- current pressure
sea_level_hPa {float} -- The current hPa at sea level.
Returns:
[type] -- [description]
"""
return 44330.8 * (1 - pow(pressure / sea_level_hPa, 0.190263))
def compute_sea_level(altitude: float, atmospheric: float) -> float:
"""
Calculates the pressure at sea level (in hPa) from the specified altitude
(in meters), and atmospheric pressure (in hPa).
# Equation taken from BMP180 datasheet (page 17):
# http://www.adafruit.com/datasheets/BST-BMP180-DS000-09.pdf
Args:
altitude : Altitude in meters
atmospheric : Atmospheric pressure in hPa
Return:
float The approximate pressure
"""
return atmospheric / pow(1.0 - (altitude / 44330.0), 5.255)
def euler_from_quaternion(x, y, z, w, rad=False, approx=1):
"""
Convert a quaternion into euler angles (roll, pitch, yaw)
roll is rotation around x in radians (counterclockwise)
pitch is rotation around y in radians (counterclockwise)
yaw is rotation around z in radians (counterclockwise)
"""
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
roll_x = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
pitch_y = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
if not rad:
roll_x = round(math.degrees(roll_x), approx)
pitch_y = round(math.degrees(pitch_y), approx)
yaw_z = round(math.degrees(yaw_z), approx)
return roll_x, pitch_y, yaw_z # in radians
def main(args=None):
rclpy.init()
parser = argparse.ArgumentParser()
parser.add_argument("--bind", type=str, default=get_ip_address("wlan0"))
parser.add_argument("--port", type=int, default=8080)
parser.add_argument("--width", type=int, default=640)
parser.add_argument("--height", type=int, default=480)
parser.add_argument("--directory", type=str, default="html")
parser.add_argument("--device", type=str, default="jetson")
args = parser.parse_args()
# The parameter "--device" can be integer 0, 1, 2 etc or a string if tis is "jetson" we wil use the jetson caemra as capture device
camera = Camera(args.device, args.width, args.height)
try:
server = ThreadedHTTPServer((args.bind, args.port), CameraHandler)
server.set_camera(camera)
server.set_document_root(args.directory)
logger.info("server started")
thread2 = threading.Thread(target=server.serve_forever)
thread2.start()
r_info = Robot_Info()
# server.serve_forever()
# Setup and start the thread to read serial port r_info = Robot_Info()
thread_lock = Lock()
# thread = threading.Thread(target=rclpy.spin, args=(server))
thread = threading.Thread(target=rclpy.spin(r_info))
thread.start()
except KeyboardInterrupt:
logger.info("server is stopping ...")
camera.release()
server.shutdown()
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
r_info.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
|
test_setup.py | """Test component/platform setup."""
# pylint: disable=protected-access
import asyncio
import datetime
import os
import threading
from unittest.mock import AsyncMock, Mock, patch
import pytest
import voluptuous as vol
from homeassistant import config_entries, setup
import homeassistant.config as config_util
from homeassistant.const import EVENT_COMPONENT_LOADED, EVENT_HOMEASSISTANT_START
from homeassistant.core import callback
from homeassistant.helpers import discovery
from homeassistant.helpers.config_validation import (
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
MockModule,
MockPlatform,
assert_setup_component,
get_test_config_dir,
get_test_home_assistant,
mock_entity_platform,
mock_integration,
)
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
VERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE)
@pytest.fixture(autouse=True)
def mock_handlers():
"""Mock config flows."""
class MockFlowHandler(config_entries.ConfigFlow):
"""Define a mock flow handler."""
VERSION = 1
with patch.dict(config_entries.HANDLERS, {"comp": MockFlowHandler}):
yield
class TestSetup:
"""Test the bootstrap utils."""
hass = None
backup_cache = None
# pylint: disable=invalid-name, no-self-use
def setup_method(self, method):
"""Set up the test."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Clean up."""
self.hass.stop()
def test_validate_component_config(self):
"""Test validating component configuration."""
config_schema = vol.Schema({"comp_conf": {"hello": str}}, required=True)
mock_integration(
self.hass, MockModule("comp_conf", config_schema=config_schema)
)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, "comp_conf", {})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(
self.hass, "comp_conf", {"comp_conf": None}
)
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, "comp_conf", {"comp_conf": {}})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(
self.hass,
"comp_conf",
{"comp_conf": {"hello": "world", "invalid": "extra"}},
)
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(1):
assert setup.setup_component(
self.hass, "comp_conf", {"comp_conf": {"hello": "world"}}
)
def test_validate_platform_config(self, caplog):
"""Test validating platform configuration."""
platform_schema = PLATFORM_SCHEMA.extend({"hello": str})
platform_schema_base = PLATFORM_SCHEMA_BASE.extend({})
mock_integration(
self.hass,
MockModule("platform_conf", platform_schema_base=platform_schema_base),
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform(platform_schema=platform_schema),
)
with assert_setup_component(0):
assert setup.setup_component(
self.hass,
"platform_conf",
{"platform_conf": {"platform": "not_existing", "hello": "world"}},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{"platform_conf": {"platform": "whatever", "hello": "world"}},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{"platform_conf": [{"platform": "whatever", "hello": "world"}]},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
# Any falsey platform config will be ignored (None, {}, etc)
with assert_setup_component(0) as config:
assert setup.setup_component(
self.hass, "platform_conf", {"platform_conf": None}
)
assert "platform_conf" in self.hass.config.components
assert not config["platform_conf"] # empty
assert setup.setup_component(
self.hass, "platform_conf", {"platform_conf": {}}
)
assert "platform_conf" in self.hass.config.components
assert not config["platform_conf"] # empty
def test_validate_platform_config_2(self, caplog):
"""Test component PLATFORM_SCHEMA_BASE prio over PLATFORM_SCHEMA."""
platform_schema = PLATFORM_SCHEMA.extend({"hello": str})
platform_schema_base = PLATFORM_SCHEMA_BASE.extend({"hello": "world"})
mock_integration(
self.hass,
MockModule(
"platform_conf",
platform_schema=platform_schema,
platform_schema_base=platform_schema_base,
),
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform("whatever", platform_schema=platform_schema),
)
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{
# pass
"platform_conf": {"platform": "whatever", "hello": "world"},
# fail: key hello violates component platform_schema_base
"platform_conf 2": {"platform": "whatever", "hello": "there"},
},
)
def test_validate_platform_config_3(self, caplog):
"""Test fallback to component PLATFORM_SCHEMA."""
component_schema = PLATFORM_SCHEMA_BASE.extend({"hello": str})
platform_schema = PLATFORM_SCHEMA.extend({"cheers": str, "hello": "world"})
mock_integration(
self.hass, MockModule("platform_conf", platform_schema=component_schema)
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform("whatever", platform_schema=platform_schema),
)
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{
# pass
"platform_conf": {"platform": "whatever", "hello": "world"},
# fail: key hello violates component platform_schema
"platform_conf 2": {"platform": "whatever", "hello": "there"},
},
)
def test_validate_platform_config_4(self):
"""Test entity_namespace in PLATFORM_SCHEMA."""
component_schema = PLATFORM_SCHEMA_BASE
platform_schema = PLATFORM_SCHEMA
mock_integration(
self.hass,
MockModule("platform_conf", platform_schema_base=component_schema),
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform(platform_schema=platform_schema),
)
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{
"platform_conf": {
# pass: entity_namespace accepted by PLATFORM_SCHEMA
"platform": "whatever",
"entity_namespace": "yummy",
}
},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
def test_component_not_found(self):
"""setup_component should not crash if component doesn't exist."""
assert setup.setup_component(self.hass, "non_existing", {}) is False
def test_component_not_double_initialized(self):
"""Test we do not set up a component twice."""
mock_setup = Mock(return_value=True)
mock_integration(self.hass, MockModule("comp", setup=mock_setup))
assert setup.setup_component(self.hass, "comp", {})
assert mock_setup.called
mock_setup.reset_mock()
assert setup.setup_component(self.hass, "comp", {})
assert not mock_setup.called
@patch("homeassistant.util.package.install_package", return_value=False)
def test_component_not_installed_if_requirement_fails(self, mock_install):
"""Component setup should fail if requirement can't install."""
self.hass.config.skip_pip = False
mock_integration(self.hass, MockModule("comp", requirements=["package==0.0.1"]))
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
def test_component_not_setup_twice_if_loaded_during_other_setup(self):
"""Test component setup while waiting for lock is not set up twice."""
result = []
async def async_setup(hass, config):
"""Tracking Setup."""
result.append(1)
mock_integration(self.hass, MockModule("comp", async_setup=async_setup))
def setup_component():
"""Set up the component."""
setup.setup_component(self.hass, "comp", {})
thread = threading.Thread(target=setup_component)
thread.start()
setup.setup_component(self.hass, "comp", {})
thread.join()
assert len(result) == 1
def test_component_not_setup_missing_dependencies(self):
"""Test we do not set up a component if not all dependencies loaded."""
deps = ["maybe_existing"]
mock_integration(self.hass, MockModule("comp", dependencies=deps))
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(self.hass, MockModule("comp2", dependencies=deps))
mock_integration(self.hass, MockModule("maybe_existing"))
assert setup.setup_component(self.hass, "comp2", {})
def test_component_failing_setup(self):
"""Test component that fails setup."""
mock_integration(
self.hass, MockModule("comp", setup=lambda hass, config: False)
)
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
def test_component_exception_setup(self):
"""Test component that raises exception during setup."""
def exception_setup(hass, config):
"""Raise exception."""
raise Exception("fail!")
mock_integration(self.hass, MockModule("comp", setup=exception_setup))
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
def test_component_setup_with_validation_and_dependency(self):
"""Test all config is passed to dependencies."""
def config_check_setup(hass, config):
"""Test that config is passed in."""
if config.get("comp_a", {}).get("valid", False):
return True
raise Exception(f"Config not passed in: {config}")
platform = MockPlatform()
mock_integration(self.hass, MockModule("comp_a", setup=config_check_setup))
mock_integration(
self.hass,
MockModule("platform_a", setup=config_check_setup, dependencies=["comp_a"]),
)
mock_entity_platform(self.hass, "switch.platform_a", platform)
setup.setup_component(
self.hass,
"switch",
{"comp_a": {"valid": True}, "switch": {"platform": "platform_a"}},
)
self.hass.block_till_done()
assert "comp_a" in self.hass.config.components
def test_platform_specific_config_validation(self):
"""Test platform that specifies config."""
platform_schema = PLATFORM_SCHEMA.extend(
{"valid": True}, extra=vol.PREVENT_EXTRA
)
mock_setup = Mock(spec_set=True)
mock_entity_platform(
self.hass,
"switch.platform_a",
MockPlatform(platform_schema=platform_schema, setup_platform=mock_setup),
)
with assert_setup_component(0, "switch"):
assert setup.setup_component(
self.hass,
"switch",
{"switch": {"platform": "platform_a", "invalid": True}},
)
self.hass.block_till_done()
assert mock_setup.call_count == 0
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("switch")
with assert_setup_component(0):
assert setup.setup_component(
self.hass,
"switch",
{
"switch": {
"platform": "platform_a",
"valid": True,
"invalid_extra": True,
}
},
)
self.hass.block_till_done()
assert mock_setup.call_count == 0
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("switch")
with assert_setup_component(1, "switch"):
assert setup.setup_component(
self.hass,
"switch",
{"switch": {"platform": "platform_a", "valid": True}},
)
self.hass.block_till_done()
assert mock_setup.call_count == 1
def test_disable_component_if_invalid_return(self):
"""Test disabling component if invalid return."""
mock_integration(
self.hass, MockModule("disabled_component", setup=lambda hass, config: None)
)
assert not setup.setup_component(self.hass, "disabled_component", {})
assert "disabled_component" not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(
self.hass,
MockModule("disabled_component", setup=lambda hass, config: False),
)
assert not setup.setup_component(self.hass, "disabled_component", {})
assert "disabled_component" not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(
self.hass, MockModule("disabled_component", setup=lambda hass, config: True)
)
assert setup.setup_component(self.hass, "disabled_component", {})
assert "disabled_component" in self.hass.config.components
def test_all_work_done_before_start(self):
"""Test all init work done till start."""
call_order = []
async def component1_setup(hass, config):
"""Set up mock component."""
await discovery.async_discover(
hass, "test_component2", {}, "test_component2", {}
)
await discovery.async_discover(
hass, "test_component3", {}, "test_component3", {}
)
return True
def component_track_setup(hass, config):
"""Set up mock component."""
call_order.append(1)
return True
mock_integration(
self.hass, MockModule("test_component1", async_setup=component1_setup)
)
mock_integration(
self.hass, MockModule("test_component2", setup=component_track_setup)
)
mock_integration(
self.hass, MockModule("test_component3", setup=component_track_setup)
)
@callback
def track_start(event):
"""Track start event."""
call_order.append(2)
self.hass.bus.listen_once(EVENT_HOMEASSISTANT_START, track_start)
self.hass.add_job(setup.async_setup_component(self.hass, "test_component1", {}))
self.hass.block_till_done()
self.hass.start()
assert call_order == [1, 1, 2]
async def test_component_warn_slow_setup(hass):
"""Warn we log when a component setup takes a long time."""
mock_integration(hass, MockModule("test_component1"))
with patch.object(hass.loop, "call_later") as mock_call:
result = await setup.async_setup_component(hass, "test_component1", {})
assert result
assert mock_call.called
assert len(mock_call.mock_calls) == 3
timeout, logger_method = mock_call.mock_calls[0][1][:2]
assert timeout == setup.SLOW_SETUP_WARNING
assert logger_method == setup._LOGGER.warning
assert mock_call().cancel.called
async def test_platform_no_warn_slow(hass):
"""Do not warn for long entity setup time."""
mock_integration(
hass, MockModule("test_component1", platform_schema=PLATFORM_SCHEMA)
)
with patch.object(hass.loop, "call_later") as mock_call:
result = await setup.async_setup_component(hass, "test_component1", {})
assert result
assert len(mock_call.mock_calls) == 0
async def test_platform_error_slow_setup(hass, caplog):
"""Don't block startup more than SLOW_SETUP_MAX_WAIT."""
with patch.object(setup, "SLOW_SETUP_MAX_WAIT", 1):
called = []
async def async_setup(*args):
"""Tracking Setup."""
called.append(1)
await asyncio.sleep(2)
mock_integration(hass, MockModule("test_component1", async_setup=async_setup))
result = await setup.async_setup_component(hass, "test_component1", {})
assert len(called) == 1
assert not result
assert "test_component1 is taking longer than 1 seconds" in caplog.text
async def test_when_setup_already_loaded(hass):
"""Test when setup."""
calls = []
async def mock_callback(hass, component):
"""Mock callback."""
calls.append(component)
setup.async_when_setup(hass, "test", mock_callback)
await hass.async_block_till_done()
assert calls == []
hass.config.components.add("test")
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {"component": "test"})
await hass.async_block_till_done()
assert calls == ["test"]
# Event listener should be gone
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {"component": "test"})
await hass.async_block_till_done()
assert calls == ["test"]
# Should be called right away
setup.async_when_setup(hass, "test", mock_callback)
await hass.async_block_till_done()
assert calls == ["test", "test"]
async def test_async_when_setup_or_start_already_loaded(hass):
"""Test when setup or start."""
calls = []
async def mock_callback(hass, component):
"""Mock callback."""
calls.append(component)
setup.async_when_setup_or_start(hass, "test", mock_callback)
await hass.async_block_till_done()
assert calls == []
hass.config.components.add("test")
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {"component": "test"})
await hass.async_block_till_done()
assert calls == ["test"]
# Event listener should be gone
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {"component": "test"})
await hass.async_block_till_done()
assert calls == ["test"]
# Should be called right away
setup.async_when_setup_or_start(hass, "test", mock_callback)
await hass.async_block_till_done()
assert calls == ["test", "test"]
setup.async_when_setup_or_start(hass, "not_loaded", mock_callback)
await hass.async_block_till_done()
assert calls == ["test", "test"]
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert calls == ["test", "test", "not_loaded"]
async def test_setup_import_blows_up(hass):
"""Test that we handle it correctly when importing integration blows up."""
with patch(
"homeassistant.loader.Integration.get_component", side_effect=ValueError
):
assert not await setup.async_setup_component(hass, "sun", {})
async def test_parallel_entry_setup(hass):
"""Test config entries are set up in parallel."""
MockConfigEntry(domain="comp", data={"value": 1}).add_to_hass(hass)
MockConfigEntry(domain="comp", data={"value": 2}).add_to_hass(hass)
calls = []
async def mock_async_setup_entry(hass, entry):
"""Mock setting up an entry."""
calls.append(entry.data["value"])
await asyncio.sleep(0)
calls.append(entry.data["value"])
return True
mock_integration(
hass,
MockModule(
"comp",
async_setup_entry=mock_async_setup_entry,
),
)
mock_entity_platform(hass, "config_flow.comp", None)
await setup.async_setup_component(hass, "comp", {})
assert calls == [1, 2, 1, 2]
async def test_integration_disabled(hass, caplog):
"""Test we can disable an integration."""
disabled_reason = "Dependency contains code that breaks Safegate Pro"
mock_integration(
hass,
MockModule("test_component1", partial_manifest={"disabled": disabled_reason}),
)
result = await setup.async_setup_component(hass, "test_component1", {})
assert not result
assert disabled_reason in caplog.text
async def test_async_get_loaded_integrations(hass):
"""Test we can enumerate loaded integations."""
hass.config.components.add("notbase")
hass.config.components.add("switch")
hass.config.components.add("notbase.switch")
hass.config.components.add("myintegration")
hass.config.components.add("device_tracker")
hass.config.components.add("device_tracker.other")
hass.config.components.add("myintegration.light")
assert setup.async_get_loaded_integrations(hass) == {
"other",
"switch",
"notbase",
"myintegration",
"device_tracker",
}
async def test_integration_no_setup(hass, caplog):
"""Test we fail integration setup without setup functions."""
mock_integration(
hass,
MockModule("test_integration_without_setup", setup=False),
)
result = await setup.async_setup_component(
hass, "test_integration_without_setup", {}
)
assert not result
assert "No setup or config entry setup function defined" in caplog.text
async def test_integration_only_setup_entry(hass):
"""Test we have an integration with only a setup entry method."""
mock_integration(
hass,
MockModule(
"test_integration_only_entry",
setup=False,
async_setup_entry=AsyncMock(return_value=True),
),
)
assert await setup.async_setup_component(hass, "test_integration_only_entry", {})
async def test_async_start_setup(hass):
"""Test setup started context manager keeps track of setup times."""
with setup.async_start_setup(hass, ["august"]):
assert isinstance(
hass.data[setup.DATA_SETUP_STARTED]["august"], datetime.datetime
)
with setup.async_start_setup(hass, ["august"]):
assert isinstance(
hass.data[setup.DATA_SETUP_STARTED]["august_2"], datetime.datetime
)
assert "august" not in hass.data[setup.DATA_SETUP_STARTED]
assert isinstance(hass.data[setup.DATA_SETUP_TIME]["august"], datetime.timedelta)
assert "august_2" not in hass.data[setup.DATA_SETUP_TIME]
async def test_async_start_setup_platforms(hass):
"""Test setup started context manager keeps track of setup times for platforms."""
with setup.async_start_setup(hass, ["sensor.august"]):
assert isinstance(
hass.data[setup.DATA_SETUP_STARTED]["sensor.august"], datetime.datetime
)
assert "august" not in hass.data[setup.DATA_SETUP_STARTED]
assert isinstance(hass.data[setup.DATA_SETUP_TIME]["august"], datetime.timedelta)
assert "sensor" not in hass.data[setup.DATA_SETUP_TIME]
|
threading_names_log.py | #
"""Using thread names in logs
"""
# end_pymotw_header
import logging
import threading
import time
def worker():
logging.debug("Starting")
time.sleep(0.2)
logging.debug("Exiting")
def my_service():
logging.debug("Starting")
time.sleep(0.3)
logging.debug("Exiting")
logging.basicConfig(
level=logging.DEBUG, format="[%(levelname)s] (%(threadName)-10s) %(message)s"
)
t = threading.Thread(name="my_service", target=my_service)
w = threading.Thread(name="worker", target=worker)
w2 = threading.Thread(target=worker) # use default name
w.start()
w2.start()
t.start()
|
test_wraith.py | """Use wraith to compare current version against published docs.
"""
import unittest
import os
import copy
import re
import yaml
import subprocess
import contextlib
from distutils.version import LooseVersion
import http.server
import socketserver
import threading
REFERENCE_URL = "https://www.cgat.org/downloads/public/CGATReport/documentation"
WRAITH_WORKDIR = os.path.abspath("wraith")
TEST_PORT=9100
TEST_HOST="localhost"
spider_config_template = """
browser: "phantomjs"
domains:
test: http://{test_host}:{test_port}
spider_skips:
- !ruby/regexp /static$/
- !ruby/regexp /%23/
- !ruby/regexp /.eps$/
- !ruby/regexp /.svg$/
- !ruby/regexp /.xlsx$/
- !ruby/regexp /notebook/
- !ruby/regexp /code/
directory: 'shots'
imports: "{wraith_data_config}"
phantomjs_options: '--ignore-ssl-errors=true --ssl-protocol=tlsv1'
"""
capture_config_template = """
browser: "phantomjs"
domains:
test: http://{test_host}:{test_port}
current: {reference_url}
spider_skips:
- !ruby/regexp /static$/
- !ruby/regexp /%23/
imports: "{wraith_data_config}"
screen_widths:
- 1280
directory: 'shots'
fuzz: '20%'
threshold: 5
gallery:
thumb_width: 200
thumb_height: 200
mode: diffs_only
phantomjs_options: '--ignore-ssl-errors=true --ssl-protocol=tlsv1'
"""
@contextlib.contextmanager
def changedir(path):
save_dir = os.path.abspath(os.getcwd())
os.chdir(path)
try:
yield
finally:
os.chdir(save_dir)
def run_server():
run("python -m http.server {} >& server.log".format(TEST_PORT))
@contextlib.contextmanager
def start_server(workdir):
handler = http.server.SimpleHTTPRequestHandler
with changedir(workdir):
# thread = threading.Thread(target=run_server)
# thread.start()
print("yielding")
yield
print("back from yield")
def run(statement,
return_stdout=False,
return_popen=False,
**kwargs):
'''execute a command line statement.
By default this method returns the code returned by the executed
command. If *return_stdout* is True, the contents of stdout are
returned as a file object. If *return_popen*, the Popen object is
returned.
``kwargs`` are passed on to subprocess.call,
subprocess.check_output or subprocess.Popen.
Raises
------
OSError
If process failed or was terminated.
'''
# remove new lines
statement = " ".join(re.sub("\t+", " ", statement).split("\n")).strip()
print(statement)
if "<(" in statement:
shell = os.environ.get('SHELL', "/bin/bash")
if "bash" not in shell:
raise ValueError(
"require bash for advanced shell syntax: <()")
# Note: pipes.quote is deprecated. In Py3, use shlex.quote
# (not present in Py2.7)
statement = "%s -c %s" % (shell, pipes.quote(statement))
if return_stdout:
return subprocess.check_output(statement, shell=True, **kwargs).decode("utf-8")
elif return_popen:
return subprocess.Popen(statement, shell=True, **kwargs)
else:
retcode = subprocess.call(statement, shell=True, **kwargs)
if retcode < 0:
raise OSError("process was terminated by signal %i" % -retcode)
return retcode
def check_version(cmd, regex, min_version):
version_txt = run(cmd , return_stdout=True)
version = re.search(regex, version_txt).groups()[0]
if LooseVersion(version) < LooseVersion(min_version):
raise ValueError("version check failed: {} < {}, '{}'".format(
version, min_version, cmd))
return version
class TestWraith(unittest.TestCase):
def setUp(self):
source_dir = os.path.join(
os.path.dirname(os.path.dirname(
os.path.abspath(__file__))),
"doc", "_build", "html")
# check if npm is intalled
npm_version = check_version("npm --version", "(\S+)", "3.10")
# check if phantomjs is installed
phantomjs_version = check_version("npm list -g | grep phantom",
"phantomjs@(\S+)",
"2.1")
ruby_version = check_version("ruby --version",
"ruby (\S+)",
"2.1")
wraith_version = check_version(
"gem list | grep wraith",
"wraith \((\S+)\)",
"4.0.1")
# get gem info
gem_data = yaml.load(run("gem environment", return_stdout=True))
gem_paths = []
for record in gem_data["RubyGems Environment"]:
for key, value in record.items():
if key == "GEM PATHS":
gem_paths.extend(value)
break
if not gem_paths:
raise ValueError("could not find GEM PATHS in gem environment")
filenames = [os.path.join(path,
"gems/wraith-{}/lib/wraith/spider.rb".format(wraith_version))
for path in gem_paths]
if sum([os.path.exists(fn) for fn in filenames]) == 0:
raise ValueError("could not find file spider.rb to patch in {}".format(filenames))
for fn in filenames:
if not os.path.exists(fn):
continue
with open(fn) as inf:
data = inf.read()
if "path.downcase" in data:
with open(fn, "w") as outf:
outf.write(re.sub("path.downcase", "path", data))
# crawl new docs to collect documents to test
config_dir = os.path.abspath(os.path.join(WRAITH_WORKDIR, "config"))
wraith_spider_config = os.path.join(config_dir, "wraith_spider.yml")
wraith_capture_config = os.path.join(config_dir, "wraith_capture.yml")
wraith_data_config = os.path.join(config_dir, "wraith_data.yml")
if not os.path.exists(config_dir):
os.makedirs(config_dir)
if not os.path.exists(wraith_spider_config):
# do not crawl with reference, as crawler follows external links
spider_config = spider_config_template.format(
wraith_data_config=os.path.basename(wraith_data_config),
test_host=TEST_HOST,
test_port=TEST_PORT)
with open(wraith_spider_config, "w") as outf:
outf.write(spider_config)
if not os.path.exists(wraith_data_config):
with start_server(source_dir) as server:
run("cd {} && wraith spider {}".format(WRAITH_WORKDIR, wraith_spider_config))
if not os.path.exists(wraith_capture_config):
# do not crawl with reference, as crawler follows external links
capture_config = capture_config_template.format(
wraith_data_config=os.path.basename(wraith_data_config),
reference_url=REFERENCE_URL,
test_host=TEST_HOST,
test_port=TEST_PORT)
with open(wraith_capture_config, "w") as outf:
outf.write(capture_config)
self.wraith_capture_config = wraith_capture_config
self.source_dir = source_dir
def test_against_reference(self):
with start_server(self.source_dir) as server:
run("cd {} && wraith capture {}".format(WRAITH_WORKDIR,
self.wraith_capture_config))
if __name__ == "__main__":
unittest.main()
|
test_node.py | import os
import sys
import logging
import requests
import time
import traceback
import random
import pytest
import ray
import threading
from datetime import datetime, timedelta
from ray.cluster_utils import Cluster
from ray.dashboard.modules.node.node_consts import (LOG_PRUNE_THREASHOLD,
MAX_LOGS_TO_CACHE)
from ray.dashboard.tests.conftest import * # noqa
from ray._private.test_utils import (
format_web_url, wait_until_server_available, wait_for_condition,
wait_until_succeeded_without_exception)
logger = logging.getLogger(__name__)
def test_nodes_update(enable_test_module, ray_start_with_dashboard):
assert (wait_until_server_available(ray_start_with_dashboard["webui_url"])
is True)
webui_url = ray_start_with_dashboard["webui_url"]
webui_url = format_web_url(webui_url)
timeout_seconds = 10
start_time = time.time()
while True:
time.sleep(1)
try:
response = requests.get(webui_url + "/test/dump")
response.raise_for_status()
try:
dump_info = response.json()
except Exception as ex:
logger.info("failed response: %s", response.text)
raise ex
assert dump_info["result"] is True
dump_data = dump_info["data"]
assert len(dump_data["nodes"]) == 1
assert len(dump_data["agents"]) == 1
assert len(dump_data["nodeIdToIp"]) == 1
assert len(dump_data["nodeIdToHostname"]) == 1
assert dump_data["nodes"].keys() == dump_data[
"nodeIdToHostname"].keys()
response = requests.get(webui_url + "/test/notified_agents")
response.raise_for_status()
try:
notified_agents = response.json()
except Exception as ex:
logger.info("failed response: %s", response.text)
raise ex
assert notified_agents["result"] is True
notified_agents = notified_agents["data"]
assert len(notified_agents) == 1
assert notified_agents == dump_data["agents"]
break
except (AssertionError, requests.exceptions.ConnectionError) as e:
logger.info("Retry because of %s", e)
finally:
if time.time() > start_time + timeout_seconds:
raise Exception("Timed out while testing.")
def test_node_info(disable_aiohttp_cache, ray_start_with_dashboard):
@ray.remote
class Actor:
def getpid(self):
print(f"actor pid={os.getpid()}")
return os.getpid()
actors = [Actor.remote(), Actor.remote()]
actor_pids = [actor.getpid.remote() for actor in actors]
actor_pids = set(ray.get(actor_pids))
assert (wait_until_server_available(ray_start_with_dashboard["webui_url"])
is True)
webui_url = ray_start_with_dashboard["webui_url"]
webui_url = format_web_url(webui_url)
node_id = ray_start_with_dashboard["node_id"]
timeout_seconds = 10
start_time = time.time()
last_ex = None
while True:
time.sleep(1)
try:
response = requests.get(webui_url + "/nodes?view=hostnamelist")
response.raise_for_status()
hostname_list = response.json()
assert hostname_list["result"] is True, hostname_list["msg"]
hostname_list = hostname_list["data"]["hostNameList"]
assert len(hostname_list) == 1
hostname = hostname_list[0]
response = requests.get(webui_url + f"/nodes/{node_id}")
response.raise_for_status()
detail = response.json()
assert detail["result"] is True, detail["msg"]
detail = detail["data"]["detail"]
assert detail["hostname"] == hostname
assert detail["raylet"]["state"] == "ALIVE"
assert "raylet" in detail["cmdline"][0]
assert len(detail["workers"]) >= 2
assert len(detail["actors"]) == 2, detail["actors"]
assert len(detail["raylet"]["viewData"]) > 0
actor_worker_pids = set()
for worker in detail["workers"]:
if "ray::Actor" in worker["cmdline"][0]:
actor_worker_pids.add(worker["pid"])
assert actor_worker_pids == actor_pids
response = requests.get(webui_url + "/nodes?view=summary")
response.raise_for_status()
summary = response.json()
assert summary["result"] is True, summary["msg"]
assert len(summary["data"]["summary"]) == 1
summary = summary["data"]["summary"][0]
assert summary["hostname"] == hostname
assert summary["raylet"]["state"] == "ALIVE"
assert "raylet" in summary["cmdline"][0]
assert "workers" not in summary
assert "actors" not in summary
assert "viewData" not in summary["raylet"]
assert "objectStoreAvailableMemory" in summary["raylet"]
assert "objectStoreUsedMemory" in summary["raylet"]
break
except Exception as ex:
last_ex = ex
finally:
if time.time() > start_time + timeout_seconds:
ex_stack = traceback.format_exception(
type(last_ex), last_ex,
last_ex.__traceback__) if last_ex else []
ex_stack = "".join(ex_stack)
raise Exception(f"Timed out while testing, {ex_stack}")
def test_memory_table(disable_aiohttp_cache, ray_start_with_dashboard):
assert (wait_until_server_available(ray_start_with_dashboard["webui_url"]))
@ray.remote
class ActorWithObjs:
def __init__(self):
self.obj_ref = ray.put([1, 2, 3])
def get_obj(self):
return ray.get(self.obj_ref)
my_obj = ray.put([1, 2, 3] * 100) # noqa
actors = [ActorWithObjs.remote() for _ in range(2)] # noqa
results = ray.get([actor.get_obj.remote() for actor in actors]) # noqa
webui_url = format_web_url(ray_start_with_dashboard["webui_url"])
resp = requests.get(
webui_url + "/memory/set_fetch", params={"shouldFetch": "true"})
resp.raise_for_status()
def check_mem_table():
resp = requests.get(f"{webui_url}/memory/memory_table")
resp_data = resp.json()
assert resp_data["result"]
latest_memory_table = resp_data["data"]["memoryTable"]
summary = latest_memory_table["summary"]
# 1 ref per handle and per object the actor has a ref to
assert summary["totalActorHandles"] == len(actors) * 2
# 1 ref for my_obj
assert summary["totalLocalRefCount"] == 1
wait_until_succeeded_without_exception(
check_mem_table, (AssertionError, ), timeout_ms=1000)
def test_get_all_node_details(disable_aiohttp_cache, ray_start_with_dashboard):
assert (wait_until_server_available(ray_start_with_dashboard["webui_url"]))
webui_url = format_web_url(ray_start_with_dashboard["webui_url"])
@ray.remote
class ActorWithObjs:
def __init__(self):
print("I also log a line")
self.obj_ref = ray.put([1, 2, 3])
def get_obj(self):
return ray.get(self.obj_ref)
actors = [ActorWithObjs.remote() for _ in range(2)] # noqa
timeout_seconds = 20
start_time = time.time()
last_ex = None
def check_node_details():
resp = requests.get(f"{webui_url}/nodes?view=details")
resp_json = resp.json()
resp_data = resp_json["data"]
clients = resp_data["clients"]
node = clients[0]
assert len(clients) == 1
assert len(node.get("actors")) == 2
# Workers information should be in the detailed payload
assert "workers" in node
assert "logCount" in node
# Two lines printed by ActorWithObjs
assert node["logCount"] >= 2
print(node["workers"])
assert len(node["workers"]) == 2
assert node["workers"][0]["logCount"] == 1
while True:
time.sleep(1)
try:
check_node_details()
break
except (AssertionError, KeyError, IndexError) as ex:
last_ex = ex
finally:
if time.time() > start_time + timeout_seconds:
ex_stack = traceback.format_exception(
type(last_ex), last_ex,
last_ex.__traceback__) if last_ex else []
ex_stack = "".join(ex_stack)
raise Exception(f"Timed out while testing, {ex_stack}")
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"include_dashboard": True
}], indirect=True)
def test_multi_nodes_info(enable_test_module, disable_aiohttp_cache,
ray_start_cluster_head):
cluster: Cluster = ray_start_cluster_head
assert (wait_until_server_available(cluster.webui_url) is True)
webui_url = cluster.webui_url
webui_url = format_web_url(webui_url)
cluster.add_node()
cluster.add_node()
def _check_nodes():
try:
response = requests.get(webui_url + "/nodes?view=summary")
response.raise_for_status()
summary = response.json()
assert summary["result"] is True, summary["msg"]
summary = summary["data"]["summary"]
assert len(summary) == 3
for node_info in summary:
node_id = node_info["raylet"]["nodeId"]
response = requests.get(webui_url + f"/nodes/{node_id}")
response.raise_for_status()
detail = response.json()
assert detail["result"] is True, detail["msg"]
detail = detail["data"]["detail"]
assert detail["raylet"]["state"] == "ALIVE"
response = requests.get(webui_url + "/test/dump?key=agents")
response.raise_for_status()
agents = response.json()
assert len(agents["data"]["agents"]) == 3
return True
except Exception as ex:
logger.info(ex)
return False
wait_for_condition(_check_nodes, timeout=15)
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"include_dashboard": True
}], indirect=True)
def test_multi_node_churn(enable_test_module, disable_aiohttp_cache,
ray_start_cluster_head):
cluster: Cluster = ray_start_cluster_head
assert (wait_until_server_available(cluster.webui_url) is True)
webui_url = format_web_url(cluster.webui_url)
def cluster_chaos_monkey():
worker_nodes = []
while True:
time.sleep(5)
if len(worker_nodes) < 2:
worker_nodes.append(cluster.add_node())
continue
should_add_node = random.randint(0, 1)
if should_add_node:
worker_nodes.append(cluster.add_node())
else:
node_index = random.randrange(0, len(worker_nodes))
node_to_remove = worker_nodes.pop(node_index)
cluster.remove_node(node_to_remove)
def get_index():
resp = requests.get(webui_url)
resp.raise_for_status()
def get_nodes():
resp = requests.get(webui_url + "/nodes?view=summary")
resp.raise_for_status()
summary = resp.json()
assert summary["result"] is True, summary["msg"]
assert summary["data"]["summary"]
t = threading.Thread(target=cluster_chaos_monkey, daemon=True)
t.start()
t_st = datetime.now()
duration = timedelta(seconds=60)
while datetime.now() < t_st + duration:
get_index()
time.sleep(2)
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"include_dashboard": True
}], indirect=True)
def test_logs(enable_test_module, disable_aiohttp_cache,
ray_start_cluster_head):
cluster = ray_start_cluster_head
assert (wait_until_server_available(cluster.webui_url) is True)
webui_url = cluster.webui_url
webui_url = format_web_url(webui_url)
nodes = ray.nodes()
assert len(nodes) == 1
node_ip = nodes[0]["NodeManagerAddress"]
@ray.remote
class LoggingActor:
def go(self, n):
i = 0
while i < n:
print(f"On number {i}")
i += 1
def get_pid(self):
return os.getpid()
la = LoggingActor.remote()
la2 = LoggingActor.remote()
la_pid = str(ray.get(la.get_pid.remote()))
la2_pid = str(ray.get(la2.get_pid.remote()))
ray.get(la.go.remote(4))
ray.get(la2.go.remote(1))
def check_logs():
node_logs_response = requests.get(
f"{webui_url}/node_logs", params={"ip": node_ip})
node_logs_response.raise_for_status()
node_logs = node_logs_response.json()
assert node_logs["result"]
assert type(node_logs["data"]["logs"]) is dict
assert all(
pid in node_logs["data"]["logs"] for pid in (la_pid, la2_pid))
assert len(node_logs["data"]["logs"][la2_pid]) == 1
actor_one_logs_response = requests.get(
f"{webui_url}/node_logs",
params={
"ip": node_ip,
"pid": str(la_pid)
})
actor_one_logs_response.raise_for_status()
actor_one_logs = actor_one_logs_response.json()
assert actor_one_logs["result"]
assert type(actor_one_logs["data"]["logs"]) is dict
assert len(actor_one_logs["data"]["logs"][la_pid]) == 4
assert wait_until_succeeded_without_exception(
check_logs, (AssertionError, ), timeout_ms=1000)
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"include_dashboard": True
}], indirect=True)
def test_logs_clean_up(enable_test_module, disable_aiohttp_cache,
ray_start_cluster_head):
"""Check if logs from the dead pids are GC'ed.
"""
cluster = ray_start_cluster_head
assert (wait_until_server_available(cluster.webui_url) is True)
webui_url = cluster.webui_url
webui_url = format_web_url(webui_url)
nodes = ray.nodes()
assert len(nodes) == 1
node_ip = nodes[0]["NodeManagerAddress"]
@ray.remote
class LoggingActor:
def go(self, n):
i = 0
while i < n:
print(f"On number {i}")
i += 1
def get_pid(self):
return os.getpid()
la = LoggingActor.remote()
la_pid = str(ray.get(la.get_pid.remote()))
ray.get(la.go.remote(1))
def check_logs():
node_logs_response = requests.get(
f"{webui_url}/node_logs", params={"ip": node_ip})
node_logs_response.raise_for_status()
node_logs = node_logs_response.json()
assert node_logs["result"]
assert la_pid in node_logs["data"]["logs"]
assert wait_until_succeeded_without_exception(
check_logs, (AssertionError, ), timeout_ms=1000)
ray.kill(la)
def check_logs_not_exist():
node_logs_response = requests.get(
f"{webui_url}/node_logs", params={"ip": node_ip})
node_logs_response.raise_for_status()
node_logs = node_logs_response.json()
assert node_logs["result"]
assert la_pid not in node_logs["data"]["logs"]
assert wait_until_succeeded_without_exception(
check_logs_not_exist, (AssertionError, ), timeout_ms=10000)
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"include_dashboard": True
}], indirect=True)
def test_logs_max_count(enable_test_module, disable_aiohttp_cache,
ray_start_cluster_head):
"""Test that each Ray worker cannot cache more than 1000 logs at a time.
"""
cluster = ray_start_cluster_head
assert (wait_until_server_available(cluster.webui_url) is True)
webui_url = cluster.webui_url
webui_url = format_web_url(webui_url)
nodes = ray.nodes()
assert len(nodes) == 1
node_ip = nodes[0]["NodeManagerAddress"]
@ray.remote
class LoggingActor:
def go(self, n):
i = 0
while i < n:
print(f"On number {i}")
i += 1
def get_pid(self):
return os.getpid()
la = LoggingActor.remote()
la_pid = str(ray.get(la.get_pid.remote()))
ray.get(la.go.remote(MAX_LOGS_TO_CACHE * LOG_PRUNE_THREASHOLD))
def check_logs():
node_logs_response = requests.get(
f"{webui_url}/node_logs", params={"ip": node_ip})
node_logs_response.raise_for_status()
node_logs = node_logs_response.json()
assert node_logs["result"]
assert type(node_logs["data"]["logs"]) is dict
assert la_pid in node_logs["data"]["logs"]
log_lengths = len(node_logs["data"]["logs"][la_pid])
assert log_lengths >= MAX_LOGS_TO_CACHE
assert log_lengths <= MAX_LOGS_TO_CACHE * LOG_PRUNE_THREASHOLD
actor_one_logs_response = requests.get(
f"{webui_url}/node_logs",
params={
"ip": node_ip,
"pid": str(la_pid)
})
actor_one_logs_response.raise_for_status()
actor_one_logs = actor_one_logs_response.json()
assert actor_one_logs["result"]
assert type(actor_one_logs["data"]["logs"]) is dict
log_lengths = len(actor_one_logs["data"]["logs"][la_pid])
assert log_lengths >= MAX_LOGS_TO_CACHE
assert log_lengths <= MAX_LOGS_TO_CACHE * LOG_PRUNE_THREASHOLD
assert wait_until_succeeded_without_exception(
check_logs, (AssertionError, ), timeout_ms=10000)
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"include_dashboard": True
}], indirect=True)
def test_errors(enable_test_module, disable_aiohttp_cache,
ray_start_cluster_head):
cluster = ray_start_cluster_head
assert (wait_until_server_available(cluster.webui_url) is True)
webui_url = cluster.webui_url
webui_url = format_web_url(webui_url)
nodes = ray.nodes()
assert len(nodes) == 1
node_ip = nodes[0]["NodeManagerAddress"]
@ray.remote
class ErrorActor():
def go(self):
raise ValueError("This is an error")
def get_pid(self):
return os.getpid()
ea = ErrorActor.remote()
ea_pid = ea.get_pid.remote()
ea.go.remote()
def check_errs():
node_errs_response = requests.get(
f"{webui_url}/node_logs", params={"ip": node_ip})
node_errs_response.raise_for_status()
node_errs = node_errs_response.json()
assert node_errs["result"]
assert "errors" in node_errs["data"]
assert type(node_errs["data"]["errors"]) is dict
assert ea_pid in node_errs["data"]["errors"]
assert len(node_errs["data"]["errors"][ea_pid]) == 1
actor_err_response = requests.get(
f"{webui_url}/node_logs",
params={
"ip": node_ip,
"pid": str(ea_pid)
})
actor_err_response.raise_for_status()
actor_errs = actor_err_response.json()
assert actor_errs["result"]
assert type(actor_errs["data"]["errors"]) is dict
assert len(actor_errs["data"]["errors"][ea_pid]) == 4
wait_until_succeeded_without_exception(
check_errs, (AssertionError, ), timeout_ms=1000)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
parallel_unittest.py | # -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for parallel library."""
from __future__ import print_function
import contextlib
import multiprocessing
import numbers
import os
import signal
import sys
import tempfile
import threading
import time
import unittest
import mock
from six.moves import cPickle as pickle
from six.moves import queue as Queue
from chromite.lib import cros_logging as logging
from chromite.lib import cros_test_lib
from chromite.lib import osutils
from chromite.lib import parallel
from chromite.lib import partial_mock
from chromite.lib import timeout_util
# pylint: disable=protected-access
_BUFSIZE = 10 ** 4
_EXIT_TIMEOUT = 30
_NUM_WRITES = 100
_NUM_THREADS = 50
_TOTAL_BYTES = _NUM_THREADS * _NUM_WRITES * _BUFSIZE
_GREETING = 'hello world'
_SKIP_FLAKY_TESTS = True
class FakeMultiprocessManager(object):
"""A fake implementation of the multiprocess manager.
This is only intended for use with ParallelMock.
"""
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, *args, **kwargs):
return None
def Queue(self):
return Queue.Queue()
def RLock(self):
return threading.RLock()
def dict(self, *args, **kwargs):
return dict(*args, **kwargs)
def list(self, *args, **kwargs):
return list(*args, **kwargs)
class ParallelMock(partial_mock.PartialMock):
"""Run parallel steps in sequence for testing purposes.
This class updates chromite.lib.parallel to just run processes in
sequence instead of running them in parallel. This is useful for
testing.
"""
TARGET = 'chromite.lib.parallel._BackgroundTask'
ATTRS = ('ParallelTasks', 'TaskRunner')
def PreStart(self):
self.PatchObject(parallel, 'Manager', side_effect=FakeMultiprocessManager)
partial_mock.PartialMock.PreStart(self)
@contextlib.contextmanager
def ParallelTasks(self, steps, max_parallel=None, halt_on_error=False):
assert max_parallel is None or isinstance(max_parallel, numbers.Integral)
assert isinstance(halt_on_error, bool)
try:
yield
finally:
for step in steps:
step()
def TaskRunner(self, queue, task, onexit=None, task_args=None,
task_kwargs=None):
# Setup of these matches the original code.
if task_args is None:
task_args = []
elif not isinstance(task_args, list):
task_args = list(task_args)
if task_kwargs is None:
task_kwargs = {}
try:
while True:
# Wait for a new item to show up on the queue. This is a blocking wait,
# so if there's nothing to do, we just sit here.
x = queue.get()
if isinstance(x, parallel._AllTasksComplete):
# All tasks are complete, so we should exit.
break
x = task_args + list(x)
task(*x, **task_kwargs)
finally:
if onexit:
onexit()
class BackgroundTaskVerifier(partial_mock.PartialMock):
"""Verify that queues are empty after BackgroundTaskRunner runs.
BackgroundTaskRunner should always empty its input queues, even if an
exception occurs. This is important for preventing a deadlock in the case
where a thread fails partway through (e.g. user presses Ctrl-C before all
input can be processed).
"""
TARGET = 'chromite.lib.parallel'
ATTRS = ('BackgroundTaskRunner',)
@contextlib.contextmanager
def BackgroundTaskRunner(self, task, *args, **kwargs):
queue = kwargs.setdefault('queue', multiprocessing.Queue())
args = [task] + list(args)
try:
with self.backup['BackgroundTaskRunner'](*args, **kwargs):
yield queue
finally:
try:
queue.get(False)
except Queue.Empty:
pass
else:
raise AssertionError('Expected empty queue after BackgroundTaskRunner')
class TestManager(cros_test_lib.TestCase):
"""Test parallel.Manager()."""
def testSigint(self):
"""Tests that parallel.Manager() ignores SIGINT."""
with parallel.Manager() as manager:
queue = manager.Queue()
os.kill(manager._process.pid, signal.SIGINT)
with self.assertRaises(Queue.Empty):
queue.get(block=False)
def testSigterm(self):
"""Tests that parallel.Manager() ignores SIGTERM."""
with parallel.Manager() as manager:
queue = manager.Queue()
os.kill(manager._process.pid, signal.SIGTERM)
with self.assertRaises(Queue.Empty):
queue.get(block=False)
class TestBackgroundWrapper(cros_test_lib.TestCase):
"""Unittests for background wrapper."""
def tearDown(self):
# Wait for children to exit.
try:
timeout_util.WaitForReturnValue([[]], multiprocessing.active_children,
timeout=_EXIT_TIMEOUT)
except timeout_util.TimeoutError:
pass
# Complain if there are any children left over.
active_children = multiprocessing.active_children()
for child in active_children:
if hasattr(child, 'Kill'):
child.Kill(signal.SIGKILL, log_level=logging.WARNING)
child.join()
self.assertEqual(multiprocessing.active_children(), [])
self.assertEqual(active_children, [])
def wrapOutputTest(self, func):
# Set _PRINT_INTERVAL to a smaller number to make it easier to
# reproduce bugs.
with mock.patch.multiple(parallel._BackgroundTask, PRINT_INTERVAL=0.01):
with tempfile.NamedTemporaryFile() as temp:
with open(temp.name, 'w') as output:
with mock.patch.multiple(sys, stdout=output):
func()
return osutils.ReadFile(temp.name)
class TestHelloWorld(TestBackgroundWrapper):
"""Test HelloWorld output in various background environments."""
def setUp(self):
self.printed_hello = multiprocessing.Event()
def _HelloWorld(self):
"""Write 'hello world' to stdout."""
sys.stdout.write('hello')
sys.stdout.flush()
sys.stdout.seek(0)
self.printed_hello.set()
# Wait for the parent process to read the output. Once the output
# has been read, try writing 'hello world' again, to be sure that
# rewritten output is not read twice.
time.sleep(parallel._BackgroundTask.PRINT_INTERVAL * 10)
sys.stdout.write(_GREETING)
sys.stdout.flush()
def _ParallelHelloWorld(self):
"""Write 'hello world' to stdout using multiple processes."""
with parallel.Manager() as manager:
queue = manager.Queue()
with parallel.BackgroundTaskRunner(self._HelloWorld, queue=queue):
queue.put([])
self.printed_hello.wait()
def VerifyDefaultQueue(self):
"""Verify that BackgroundTaskRunner will create a queue on it's own."""
with parallel.BackgroundTaskRunner(self._HelloWorld) as queue:
queue.put([])
self.printed_hello.wait()
def testParallelHelloWorld(self):
"""Test that output is not written multiple times when seeking."""
out = self.wrapOutputTest(self._ParallelHelloWorld)
self.assertEqual(out, _GREETING)
def testMultipleHelloWorlds(self):
"""Test that multiple threads can be created."""
parallel.RunParallelSteps([self.testParallelHelloWorld] * 2)
def testLongTempDirectory(self):
"""Test that we can handle a long temporary directory."""
with osutils.TempDir() as tempdir:
new_tempdir = os.path.join(tempdir, 'xxx/' * 100)
osutils.SafeMakedirs(new_tempdir)
old_tempdir, old_tempdir_env = osutils.SetGlobalTempDir(new_tempdir)
try:
self.testParallelHelloWorld()
finally:
osutils.SetGlobalTempDir(old_tempdir, old_tempdir_env)
def _BackgroundTaskRunnerArgs(results, arg1, arg2, kwarg1=None, kwarg2=None):
"""Helper for TestBackgroundTaskRunnerArgs
We specifically want a module function to test against and not a class member.
"""
results.put((arg1, arg2, kwarg1, kwarg2))
class TestBackgroundTaskRunnerArgs(TestBackgroundWrapper):
"""Unittests for BackgroundTaskRunner argument handling."""
def testArgs(self):
"""Test that we can pass args down to the task."""
with parallel.Manager() as manager:
results = manager.Queue()
arg2s = set((1, 2, 3))
with parallel.BackgroundTaskRunner(_BackgroundTaskRunnerArgs, results,
'arg1', kwarg1='kwarg1') as queue:
for arg2 in arg2s:
queue.put((arg2,))
# Since the queue is unordered, need to handle arg2 specially.
result_arg2s = set()
for _ in range(3):
result = results.get()
self.assertEqual(result[0], 'arg1')
result_arg2s.add(result[1])
self.assertEqual(result[2], 'kwarg1')
self.assertEqual(result[3], None)
self.assertEqual(arg2s, result_arg2s)
self.assertEqual(results.empty(), True)
class TestFastPrinting(TestBackgroundWrapper):
"""Stress tests for background sys.stdout handling."""
def _FastPrinter(self):
# Writing lots of output quickly often reproduces bugs in this module
# because it can trigger race conditions.
for _ in range(_NUM_WRITES - 1):
sys.stdout.write('x' * _BUFSIZE)
sys.stderr.write('x' * (_BUFSIZE - 1) + '\n')
def _ParallelPrinter(self):
parallel.RunParallelSteps([self._FastPrinter] * _NUM_THREADS)
def _NestedParallelPrinter(self):
parallel.RunParallelSteps([self._ParallelPrinter])
def testSimpleParallelPrinter(self):
out = self.wrapOutputTest(self._ParallelPrinter)
self.assertEqual(len(out), _TOTAL_BYTES)
def testNestedParallelPrinter(self):
"""Verify that no output is lost when lots of output is written."""
out = self.wrapOutputTest(self._NestedParallelPrinter)
self.assertEqual(len(out), _TOTAL_BYTES)
class TestRunParallelSteps(cros_test_lib.TestCase):
"""Tests for RunParallelSteps."""
def testReturnValues(self):
"""Test that we pass return values through when requested."""
def f1():
return 1
def f2():
return 2
def f3():
pass
return_values = parallel.RunParallelSteps([f1, f2, f3], return_values=True)
self.assertEqual(return_values, [1, 2, None])
def testLargeReturnValues(self):
"""Test that the managed queue prevents hanging on large return values."""
def f1():
return ret_value
ret_value = ''
for _ in range(10000):
ret_value += 'This will be repeated many times.\n'
return_values = parallel.RunParallelSteps([f1], return_values=True)
self.assertEqual(return_values, [ret_value])
class TestParallelMock(TestBackgroundWrapper):
"""Test the ParallelMock class."""
def setUp(self):
self._calls = 0
def _Callback(self):
self._calls += 1
return self._calls
def testRunParallelSteps(self):
"""Make sure RunParallelSteps is mocked out."""
with ParallelMock():
parallel.RunParallelSteps([self._Callback])
self.assertEqual(1, self._calls)
def testBackgroundTaskRunner(self):
"""Make sure BackgroundTaskRunner is mocked out."""
with ParallelMock():
parallel.RunTasksInProcessPool(self._Callback, [])
self.assertEqual(0, self._calls)
result = parallel.RunTasksInProcessPool(self._Callback, [[]])
self.assertEqual(1, self._calls)
self.assertEqual([1], result)
result = parallel.RunTasksInProcessPool(self._Callback, [], processes=9,
onexit=self._Callback)
self.assertEqual(10, self._calls)
self.assertEqual([], result)
result = parallel.RunTasksInProcessPool(self._Callback, [[]] * 10)
self.assertEqual(list(range(11, 21)), result)
class TestExceptions(cros_test_lib.MockOutputTestCase):
"""Test cases where child processes raise exceptions."""
def _SystemExit(self):
sys.stdout.write(_GREETING)
sys.exit(1)
def _KeyboardInterrupt(self):
sys.stdout.write(_GREETING)
raise KeyboardInterrupt()
def _BadPickler(self):
return self._BadPickler
class _TestException(Exception):
"""Custom exception for testing."""
def _VerifyExceptionRaised(self, fn, exc_type):
"""A helper function to verify the correct |exc_type| is raised."""
for task in (lambda: parallel.RunTasksInProcessPool(fn, [[]]),
lambda: parallel.RunParallelSteps([fn])):
output_str = ex_str = ex = None
with self.OutputCapturer() as capture:
with self.assertRaises(parallel.BackgroundFailure) as ex:
task()
output_str = capture.GetStdout()
ex_str = str(ex.exception)
self.assertTrue(exc_type in [x.type for x in ex.exception.exc_infos])
self.assertEqual(output_str, _GREETING)
self.assertTrue(str(exc_type) in ex_str)
def testExceptionRaising(self):
"""Tests the exceptions are raised correctly."""
self.StartPatcher(BackgroundTaskVerifier())
self._VerifyExceptionRaised(self._KeyboardInterrupt, KeyboardInterrupt)
self._VerifyExceptionRaised(self._SystemExit, SystemExit)
def testExceptionPriority(self):
"""Tests that foreground exceptions take priority over background."""
self.StartPatcher(BackgroundTaskVerifier())
with self.assertRaises(self._TestException):
with parallel.BackgroundTaskRunner(self._KeyboardInterrupt,
processes=1) as queue:
queue.put([])
raise self._TestException()
# We can't test for PickleError with Python 3.5+ due to bugs in Python.
@unittest.skipIf(sys.version_info >= (3, 5),
'https://bugs.python.org/issue29187')
def testFailedPickle(self):
"""PicklingError should be thrown when an argument fails to pickle."""
with self.assertRaises(pickle.PickleError):
parallel.RunTasksInProcessPool(self._SystemExit, [[self._SystemExit]])
def testFailedPickleOnReturn(self):
"""PicklingError should be thrown when a return value fails to pickle."""
with self.assertRaises(parallel.BackgroundFailure):
parallel.RunParallelSteps([self._BadPickler], return_values=True)
class _TestForegroundException(Exception):
"""An exception to be raised by the foreground process."""
class TestHalting(cros_test_lib.MockOutputTestCase, TestBackgroundWrapper):
"""Test that child processes are halted when exceptions occur."""
def setUp(self):
self.failed = multiprocessing.Event()
self.passed = multiprocessing.Event()
def _GetKillChildrenTimeout(self):
"""Return a timeout that is long enough for _BackgroundTask._KillChildren.
This unittest is not meant to restrict which signal succeeds in killing the
background process, so use a long enough timeout whenever asserting that the
background process is killed, keeping buffer for slow builders.
"""
return (parallel._BackgroundTask.SIGTERM_TIMEOUT +
parallel._BackgroundTask.SIGKILL_TIMEOUT) + 30
def _Pass(self):
self.passed.set()
sys.stdout.write(_GREETING)
def _Exit(self):
sys.stdout.write(_GREETING)
self.passed.wait()
sys.exit(1)
def _Fail(self):
self.failed.wait(self._GetKillChildrenTimeout())
self.failed.set()
def _PassEventually(self):
self.passed.wait(self._GetKillChildrenTimeout())
self.passed.set()
@unittest.skipIf(_SKIP_FLAKY_TESTS, 'Occasionally fails.')
def testExceptionRaising(self):
"""Test that exceptions halt all running steps."""
steps = [self._Exit, self._Fail, self._Pass, self._Fail]
output_str, ex_str = None, None
with self.OutputCapturer() as capture:
try:
parallel.RunParallelSteps(steps, halt_on_error=True)
except parallel.BackgroundFailure as ex:
output_str = capture.GetStdout()
ex_str = str(ex)
logging.debug(ex_str)
self.assertTrue('Traceback' in ex_str)
self.assertTrue(self.passed.is_set())
self.assertEqual(output_str, _GREETING)
self.assertFalse(self.failed.is_set())
def testForegroundExceptionRaising(self):
"""Test that BackgroundTaskRunner halts tasks on a foreground exception."""
with self.assertRaises(_TestForegroundException):
with parallel.BackgroundTaskRunner(self._PassEventually,
processes=1,
halt_on_error=True) as queue:
queue.put([])
raise _TestForegroundException()
self.assertFalse(self.passed.is_set())
@unittest.skipIf(_SKIP_FLAKY_TESTS, 'Occasionally fails.')
def testTempFileCleanup(self):
"""Test that all temp files are cleaned up."""
with osutils.TempDir() as tempdir:
self.assertEqual(os.listdir(tempdir), [])
self.testExceptionRaising()
self.assertEqual(os.listdir(tempdir), [])
def testKillQuiet(self, steps=None, **kwargs):
"""Test that processes do get killed if they're silent for too long."""
if steps is None:
steps = [self._Fail] * 2
kwargs.setdefault('SILENT_TIMEOUT', 0.1)
kwargs.setdefault('MINIMUM_SILENT_TIMEOUT', 0.01)
kwargs.setdefault('SILENT_TIMEOUT_STEP', 0)
kwargs.setdefault('SIGTERM_TIMEOUT', 0.1)
kwargs.setdefault('PRINT_INTERVAL', 0.01)
kwargs.setdefault('GDB_COMMANDS', ('detach',))
ex_str = None
with mock.patch.multiple(parallel._BackgroundTask, **kwargs):
with self.OutputCapturer() as capture:
try:
with cros_test_lib.LoggingCapturer():
parallel.RunParallelSteps(steps)
except parallel.BackgroundFailure as ex:
ex_str = str(ex)
error_str = capture.GetStderr()
self.assertTrue('parallel_unittest.py' in error_str)
self.assertTrue(ex_str)
class TestConstants(cros_test_lib.TestCase):
"""Test values of constants."""
def testSilentTimeout(self):
"""Verify the silent timeout is small enough."""
# Enforce that the default timeout is less than 9000, the default timeout
# set in build/scripts/master/factory/chromeos_factory.py:ChromiteFactory
# in the Chrome buildbot source code.
self.assertLess(
parallel._BackgroundTask.SILENT_TIMEOUT, 9000,
'Do not increase this timeout. Instead, print regular progress '
'updates, so that buildbot (and cbuildbot) will will know that your '
'program has not hung.')
class TestExitWithParent(cros_test_lib.TestCase):
"""Tests ExitWithParent."""
def testChildExits(self):
"""Create a child and a grandchild. The child should die with the parent."""
def GrandChild():
parallel.ExitWithParent()
time.sleep(9)
def Child(queue):
grand_child = multiprocessing.Process(target=GrandChild)
grand_child.start()
queue.put(grand_child.pid)
time.sleep(9)
with parallel.Manager() as manager:
q = manager.Queue()
child = multiprocessing.Process(target=lambda: Child(q))
child.start()
grand_child_pid = q.get(timeout=1)
# Before we kill the child, the grandchild should be running:
self.assertTrue(os.path.isdir('/proc/%d' % grand_child_pid))
os.kill(child.pid, signal.SIGKILL)
# (shortly) after we kill the child, the grandchild should kill itself.
# We can't use os.waitpid because the grandchild process is not a child
# process of ours. Just wait 20 seconds - this should be enough even if the
# machine is under load.
timeout_util.WaitForReturnTrue(
lambda: not os.path.isdir('/proc/%d' % grand_child_pid),
20,
period=0.05)
def main(_argv):
cros_test_lib.main(level='info', module=__name__)
|
__init__.py | """
This package holds communication aspects
"""
import binascii
import json
import logging
import socket
import traceback
from abc import abstractmethod
from binascii import unhexlify
from threading import Thread
from pylgbst.constants import MSG_DEVICE_SHUTDOWN, ENABLE_NOTIFICATIONS_HANDLE, ENABLE_NOTIFICATIONS_VALUE
from pylgbst.utilities import str2hex
log = logging.getLogger('comms')
LEGO_MOVE_HUB = "LEGO Move Hub"
class Connection(object):
def connect(self, hub_mac=None):
pass
@abstractmethod
def is_alive(self):
pass
def disconnect(self):
pass
@abstractmethod
def write(self, handle, data):
pass
@abstractmethod
def set_notify_handler(self, handler):
pass
def enable_notifications(self):
log.debug('Connection: enable_notifications')
self.write(ENABLE_NOTIFICATIONS_HANDLE, ENABLE_NOTIFICATIONS_VALUE)
class DebugServer(object):
"""
Starts TCP server to be used with DebugServerConnection to speed-up development process
It holds BLE connection to Move Hub, so no need to re-start it every time
Usage: DebugServer(BLEConnection().connect()).start()
:type connection: BLEConnection
"""
def __init__(self, connection):
self._running = False
self.sock = socket.socket()
self.connection = connection
def start(self, port=9090):
self.sock.bind(('', port))
self.sock.listen(1)
self._running = True
while self._running:
log.info("Accepting MoveHub debug connections at %s", port)
conn, addr = self.sock.accept()
if not self._running:
raise KeyboardInterrupt("Shutdown")
self.connection.set_notify_handler(lambda x, y: self._notify(conn, x, y))
try:
self._handle_conn(conn)
except KeyboardInterrupt:
raise
except BaseException:
log.error("Problem handling incoming connection: %s", traceback.format_exc())
finally:
self.connection.set_notify_handler(self._notify_dummy)
conn.close()
def __del__(self):
self.sock.close()
def _notify_dummy(self, handle, data):
log.debug("Dropped notification from handle %s: %s", handle, binascii.hexlify(data))
self._check_shutdown(data)
def _notify(self, conn, handle, data):
payload = {"type": "notification", "handle": handle, "data": str2hex(data)}
log.debug("Send notification: %s", payload)
try:
conn.send(json.dumps(payload) + "\n")
except KeyboardInterrupt:
raise
except BaseException:
log.error("Problem sending notification: %s", traceback.format_exc())
self._check_shutdown(data)
def _check_shutdown(self, data):
if data[5] == MSG_DEVICE_SHUTDOWN:
log.warning("Device shutdown")
self._running = False
def _handle_conn(self, conn):
"""
:type conn: socket._socketobject
"""
buf = ""
while True:
data = conn.recv(1024)
log.debug("Recv: %s", data.strip())
if not data:
break
buf += data
if "\n" in buf:
line = buf[:buf.index("\n")]
buf = buf[buf.index("\n") + 1:]
if line:
log.debug("Cmd line: %s", line)
try:
self._handle_cmd(json.loads(line))
except KeyboardInterrupt:
raise
except BaseException:
log.error("Failed to handle cmd: %s", traceback.format_exc())
def _handle_cmd(self, cmd):
if cmd['type'] == 'write':
self.connection.write(cmd['handle'], unhexlify(cmd['data']))
else:
raise ValueError("Unhandled cmd: %s", cmd)
class DebugServerConnection(Connection):
"""
Connection type to be used with DebugServer, replaces BLEConnection
"""
def __init__(self, port=9090):
super(DebugServerConnection, self).__init__()
self.notify_handler = None
self.buf = ""
self.sock = socket.socket()
self.sock.connect(('localhost', port))
self.incoming = []
self.reader = Thread(target=self._recv)
self.reader.setName("Debug connection reader")
self.reader.setDaemon(True)
self.reader.start()
def __del__(self):
self.sock.close()
def write(self, handle, data):
payload = {
"type": "write",
"handle": handle,
"data": str2hex(data)
}
self._send(payload)
def _send(self, payload):
log.debug("Sending to debug server: %s", payload)
self.sock.send(json.dumps(payload) + "\n")
def _recv(self):
while True:
data = self.sock.recv(1024)
log.debug("Recv from debug server: %s", data.strip())
if not data:
raise KeyboardInterrupt("Server has closed connection")
self.buf += data
while "\n" in self.buf:
line = self.buf[:self.buf.index("\n")]
self.buf = self.buf[self.buf.index("\n") + 1:]
if line:
item = json.loads(line)
if item['type'] == 'notification' and self.notify_handler:
try:
self.notify_handler(item['handle'], unhexlify(item['data']))
except BaseException:
log.error("Failed to notify handler: %s", traceback.format_exc())
elif item['type'] == 'response':
self.incoming.append(item)
else:
log.warning("Dropped inbound: %s", item)
def set_notify_handler(self, handler):
self.notify_handler = handler
def is_alive(self):
return self.reader.isAlive()
|
test_local_task_job.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import multiprocessing
import os
import time
import unittest
from mock import patch
from airflow import AirflowException, models, settings
from airflow.configuration import conf
from airflow.executors.sequential_executor import SequentialExecutor
from airflow.jobs import LocalTaskJob
from airflow.models import DAG, TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils import timezone
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session
from airflow.utils.state import State
from tests.test_utils.db import clear_db_runs
from tests.test_utils.mock_executor import MockExecutor
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TEST_DAG_FOLDER = os.environ['AIRFLOW__CORE__DAGS_FOLDER']
class TestLocalTaskJob(unittest.TestCase):
def setUp(self):
clear_db_runs()
patcher = patch('airflow.jobs.base_job.sleep')
self.addCleanup(patcher.stop)
self.mock_base_job_sleep = patcher.start()
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
self.assertTrue(all(check_result_1))
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
self.assertTrue(all(check_result_2))
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
@patch('os.getpid')
def test_heartbeat_failed_fast(self, mock_getpid):
"""
Test that task heartbeat will sleep when it fails fast
"""
mock_getpid.return_value = 1
self.mock_base_job_sleep.side_effect = time.sleep
with create_session() as session:
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag_id = 'test_heartbeat_failed_fast'
task_id = 'test_heartbeat_failed_fast_op'
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
dag.create_dagrun(run_id="test_heartbeat_failed_fast_run",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
job = LocalTaskJob(task_instance=ti, executor=MockExecutor(do_update=False))
job.heartrate = 2
heartbeat_records = []
job.heartbeat_callback = lambda session: heartbeat_records.append(job.latest_heartbeat)
job._execute()
self.assertGreater(len(heartbeat_records), 2)
for i in range(1, len(heartbeat_records)):
time1 = heartbeat_records[i - 1]
time2 = heartbeat_records[i]
# Assert that difference small enough to avoid:
# AssertionError: 1.996401 not greater than or equal to 2
delta = (time2 - time1).total_seconds()
self.assertAlmostEqual(delta, job.heartrate, delta=0.006)
@unittest.skipIf('mysql' in conf.get('core', 'sql_alchemy_conn'),
"flaky when run on mysql")
@unittest.skipIf('postgresql' in conf.get('core', 'sql_alchemy_conn'),
'flaky when run on postgresql')
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for _ in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run,
executor=SequentialExecutor())
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
def test_localtaskjob_maintain_heart_rate(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run,
executor=SequentialExecutor())
# this should make sure we only heartbeat once and exit at the second
# loop in _execute()
return_codes = [None, 0]
def multi_return_code():
return return_codes.pop(0)
time_start = time.time()
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_start:
with patch.object(StandardTaskRunner, 'return_code') as mock_ret_code:
mock_ret_code.side_effect = multi_return_code
job1.run()
self.assertEqual(mock_start.call_count, 1)
self.assertEqual(mock_ret_code.call_count, 2)
time_end = time.time()
self.assertEqual(self.mock_base_job_sleep.call_count, 1)
self.assertEqual(job1.state, State.SUCCESS)
# Consider we have patched sleep call, it should not be sleeping to
# keep up with the heart rate in other unpatched places
#
# We already make sure patched sleep call is only called once
self.assertLess(time_end - time_start, job1.heartrate)
session.close()
def test_mark_failure_on_failure_callback(self):
"""
Test that ensures that mark_failure in the UI fails
the task, and executes on_failure_callback
"""
data = {'called': False}
def check_failure(context):
self.assertEqual(context['dag_run'].dag_id,
'test_mark_failure')
data['called'] = True
dag = DAG(dag_id='test_mark_failure',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
task = DummyOperator(
task_id='test_state_succeeded1',
dag=dag,
on_failure_callback=check_failure)
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
job1.task_runner = StandardTaskRunner(job1)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for _ in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.FAILED
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
self.assertTrue(data['called'])
process.join(timeout=10)
self.assertFalse(process.is_alive())
|
mcedit.py | # !/usr/bin/env python2.7
# -*- coding: utf_8 -*-
# import resource_packs # not the right place, moving it a bit further
#-# Modified by D.C.-G. for translation purpose
#.# Marks the layout modifications. -- D.C.-G.
"""
mcedit.py
Startup, main menu, keyboard configuration, automatic updating.
"""
import splash
import OpenGL
import sys
import os
if "--debug-ogl" not in sys.argv:
OpenGL.ERROR_CHECKING = False
import logging
# Setup file and stderr logging.
logger = logging.getLogger()
# Set the log level up while importing OpenGL.GL to hide some obnoxious warnings about old array handlers
logger.setLevel(logging.WARN)
logger.setLevel(logging.DEBUG)
logfile = 'mcedit.log'
# if hasattr(sys, 'frozen'):
# if sys.platform == "win32":
# import esky
# app = esky.Esky(sys.executable)
# logfile = os.path.join(app.appdir, logfile)
#
if sys.platform == "darwin":
logfile = os.path.expanduser("~/Library/Logs/mcedit.log")
else:
logfile = os.path.join(os.getcwdu(), logfile)
fh = logging.FileHandler(logfile, mode="w")
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.WARN)
if "--log-info" in sys.argv:
ch.setLevel(logging.INFO)
if "--log-debug" in sys.argv:
ch.setLevel(logging.DEBUG)
class FileLineFormatter(logging.Formatter):
def format(self, record):
record.__dict__['fileline'] = "%(module)s.py:%(lineno)d" % record.__dict__
record.__dict__['nameline'] = "%(name)s.py:%(lineno)d" % record.__dict__
return super(FileLineFormatter, self).format(record)
fmt = FileLineFormatter(
'[%(levelname)8s][%(nameline)30s]:%(message)s'
)
fh.setFormatter(fmt)
ch.setFormatter(fmt)
logger.addHandler(fh)
logger.addHandler(ch)
import release
start_msg = 'Starting MCEdit-Unified v%s'%release.TAG
logger.info(start_msg)
print '[ ****** ] ~~~~~~~~~~ %s'%start_msg
#---------------------------------------------------------------------
# NEW FEATURES HANDLING
#
# The idea is to be able to implement and test/use new code without stripping off the current one.
# These features/new code will be in the released stuff, but unavailable until explicitly requested.
#
# The new features which are under development can be enabled using the 'new_features.def' file.
# This file is a plain text file with one feature to enable a line.
# The file is parsed and each feature is added to the builtins using the pattern 'mcenf_<feature>'.
# The value for these builtins is 'True'.
# Then, in the code, just check if the builtins has the key 'mcenf_<feature>' to use the new version of the code:
#
# ```
# def foo_old():
# # Was 'foo', code here is the one used unless the new version is wanted.
# [...]
#
# def foo_new():
# # This is the new version of the former 'foo' (current 'foo_old').
# [...]
#
# if __builtins__.get('mcenf_foo', False):
# foo = foo_new
# else:
# foo = foo_old
#
# ```
#
if '--new-features' in sys.argv:
if not os.path.exists('new_features.def'):
logger.warn("New features requested, but file 'new_features.def' not found!")
else:
logger.warn("New features mode requested.")
lines = [a.strip() for a in open('new_features.def', 'r').readlines()]
for line in lines:
setattr(__builtins__, 'mcenf_%s'%line, True)
logger.warn("New features list loaded.")
from version_utils import PlayerCache
import directories
import keys
import albow
import locale
DEF_ENC = locale.getdefaultlocale()[1]
if DEF_ENC is None:
DEF_ENC = "UTF-8"
from albow.translate import _, getPlatInfo
from albow.openglwidgets import GLViewport
from albow.root import RootWidget
from config import config
albow.resource.resource_dir = directories.getDataDir()
import panels
import leveleditor
# Building translation template
if "-tt" in sys.argv:
sys.argv.remove('-tt')
# Overwrite the default marker to have one adapted to our specific needs.
albow.translate.buildTemplateMarker = """
### THE FOLLOWING LINES HAS BEEN ADDED BY THE TEMPLATE UPDATE FUNCTION.
### Please, consider to analyze them and remove the entries referring
### to ones containing string formatting.
###
### For example, if you have a line already defined with this text:
### My %{animal} has %d legs.
### you may find lines like these below:
### My parrot has 2 legs.
### My dog has 4 legs.
###
### You also may have unwanted partial strings, especially the ones
### used in hotkeys. Delete them too.
### And, remove this paragraph, or it will be displayed in the program...
"""
albow.translate.buildTemplate = True
albow.translate.loadTemplate()
# Save the language defined in config and set en_US as current one.
logging.warning('MCEdit is invoked to update the translation template.')
orglang = config.settings.langCode.get()
logging.warning('The actual language is %s.'%orglang)
logging.warning('Setting en_US as language for this session.')
config.settings.langCode.set('en_US')
import mceutils
import mcplatform
# The two next switches '--debug-wm' and '--no-wm' are used to debug/disable the internal window handler.
# They are exclusive. You can't debug if it is disabled.
if "--debug-wm" in sys.argv:
mcplatform.DEBUG_WM = True
if "--no-wm" in sys.argv:
mcplatform.DEBUG_WM = False
mcplatform.USE_WM = False
else:
mcplatform.setupWindowHandler()
DEBUG_WM = mcplatform.DEBUG_WM
USE_WM = mcplatform.USE_WM
#-# DEBUG
if mcplatform.hasXlibDisplay and DEBUG_WM:
print '*** Xlib version', str(mcplatform.Xlib.__version__).replace(' ', '').replace(',', '.')[1:-1], 'found in',
if os.path.expanduser('~/.local/lib/python2.7/site-packages') in mcplatform.Xlib.__file__:
print 'user\'s',
else:
print 'system\'s',
print 'libraries.'
#-#
from mcplatform import platform_open
import numpy
from pymclevel.minecraft_server import ServerJarStorage
import os
import os.path
import pygame
from pygame import display, rect
import pymclevel
# import release
import shutil
import sys
import traceback
import threading
from utilities.gl_display_context import GLDisplayContext
#&# Prototype fro blocks/items names
import mclangres
#&#
getPlatInfo(OpenGL=OpenGL, numpy=numpy, pygame=pygame)
ESCAPE = '\033'
class MCEdit(GLViewport):
def_enc = DEF_ENC
def __init__(self, displayContext, *args):
if DEBUG_WM:
print "############################ __INIT__ ###########################"
self.resizeAlert = config.settings.showWindowSizeWarning.get()
self.maximized = config.settings.windowMaximized.get()
self.saved_pos = config.settings.windowX.get(), config.settings.windowY.get()
if displayContext.win and DEBUG_WM:
print "* self.displayContext.win.state", displayContext.win.get_state()
print "* self.displayContext.win.position", displayContext.win.get_position()
self.dis = None
self.win = None
self.wParent = None
self.wGrandParent = None
self.linux = False
if sys.platform == 'linux2' and mcplatform.hasXlibDisplay:
self.linux = True
self.dis = dis = mcplatform.Xlib.display.Display()
self.win = win = dis.create_resource_object('window', display.get_wm_info()['window'])
curDesk = os.environ.get('XDG_CURRENT_DESKTOP')
if curDesk in ('GNOME', 'X-Cinnamon', 'Unity'):
self.geomReciever = self.maximizeHandler = wParent = win.query_tree().parent
self.geomSender = wGrandParent = wParent.query_tree().parent
elif curDesk == 'KDE':
self.maximizeHandler = win.query_tree().parent
wParent = win.query_tree().parent.query_tree().parent
wGrandParent = wParent.query_tree().parent.query_tree().parent
self.geomReciever = self.geomSender = win.query_tree().parent.query_tree().parent.query_tree().parent
else:
self.maximizeHandler = self.geomReciever = self.geomSender = wGrandParent = wParent = None
self.wParent = wParent
self.wGrandParent = wGrandParent
root = dis.screen().root
windowID = root.get_full_property(dis.intern_atom('_NET_ACTIVE_WINDOW'), mcplatform.Xlib.X.AnyPropertyType).value[0]
print "###\nwindowID", windowID
window = dis.create_resource_object('window', windowID)
print "###\nwindow.get_geometry()", window.get_geometry()
print "###\nself.win", self.win.get_geometry()
print "###\nself.wParent.get_geometry()", self.wParent.get_geometry()
print "###\nself.wGrandParent.get_geometry()", self.wGrandParent.get_geometry()
try:
print "###\nself.wGrandParent.query_tree().parent.get_geometry()", self.wGrandParent.query_tree().parent.get_geometry()
except:
pass
print "###\nself.maximizeHandler.get_geometry()", self.maximizeHandler.get_geometry()
print "###\nself.geomReciever.get_geometry()", self.geomReciever.get_geometry()
print "###\nself.geomSender.get_geometry()", self.geomSender.get_geometry()
print "###\nself.win", self.win
print "###\nself.wParent", self.wParent
print "###\nself.wGrandParent", self.wGrandParent
print "###\nself.maximizeHandler", self.maximizeHandler
print "###\nself.geomReciever", self.geomReciever
print "###\nself.geomSender", self.geomSender
ws = displayContext.getWindowSize()
r = rect.Rect(0, 0, ws[0], ws[1])
GLViewport.__init__(self, r)
if DEBUG_WM:
print "self.size", self.size, "ws", ws
if displayContext.win and self.maximized:
# Send a maximize event now
displayContext.win.set_state(mcplatform.MAXIMIZED)
# Flip pygame.display to avoid to see the splash un-centered.
pygame.display.flip()
self.displayContext = displayContext
self.bg_color = (0, 0, 0, 1)
self.anchor = 'tlbr'
if not config.config.has_section("Recent Worlds"):
config.config.add_section("Recent Worlds")
self.setRecentWorlds([""] * 5)
self.optionsPanel = panels.OptionsPanel(self)
if not albow.translate.buildTemplate:
self.optionsPanel.getLanguageChoices()
lng = config.settings.langCode.get()
if lng not in self.optionsPanel.sgnal:
lng = "en_US"
config.settings.langCode.set(lng)
albow.translate.setLang(lng)
# Set the window caption here again, since the initialization is done through several steps...
display.set_caption(('MCEdit ~ ' + release.get_version()%_("for")).encode('utf-8'), 'MCEdit')
self.optionsPanel.initComponents()
self.graphicsPanel = panels.GraphicsPanel(self)
#&# Prototype for blocks/items names
mclangres.buildResources(lang=albow.translate.getLang())
#&#
#.#
self.keyConfigPanel = keys.KeyConfigPanel(self)
#.#
self.droppedLevel = None
self.nbtCopyBuffer = None
self.reloadEditor()
"""
check command line for files dropped from explorer
"""
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
f = arg.decode(sys.getfilesystemencoding())
if os.path.isdir(os.path.join(pymclevel.minecraftSaveFileDir, f)):
f = os.path.join(pymclevel.minecraftSaveFileDir, f)
self.droppedLevel = f
break
if os.path.exists(f):
self.droppedLevel = f
break
self.fileOpener = albow.FileOpener(self)
self.add(self.fileOpener)
self.fileOpener.focus()
#-# Translation live updtate preparation
def set_update_ui(self, v):
GLViewport.set_update_ui(self, v)
if v:
#&# Prototype for blocks/items names
mclangres.buildResources(lang=albow.translate.getLang())
#&#
self.keyConfigPanel = keys.KeyConfigPanel(self)
self.graphicsPanel = panels.GraphicsPanel(self)
if self.fileOpener in self.subwidgets:
idx = self.subwidgets.index(self.fileOpener)
self.remove(self.fileOpener)
self.fileOpener = albow.FileOpener(self)
if idx is not None:
self.add(self.fileOpener)
self.fileOpener.focus()
#-#
editor = None
def reloadEditor(self):
reload(leveleditor)
level = None
pos = None
if self.editor:
level = self.editor.level
self.remove(self.editor)
c = self.editor.mainViewport
pos, yaw, pitch = c.position, c.yaw, c.pitch
self.editor = leveleditor.LevelEditor(self)
self.editor.anchor = 'tlbr'
if level:
self.add(self.editor)
self.editor.gotoLevel(level)
self.focus_switch = self.editor
if pos is not None:
c = self.editor.mainViewport
c.position, c.yaw, c.pitch = pos, yaw, pitch
def add_right(self, widget):
w, h = self.size
widget.centery = h // 2
widget.right = w
self.add(widget)
def showOptions(self):
self.optionsPanel.present()
def showGraphicOptions(self):
self.graphicsPanel.present()
def showKeyConfig(self):
self.keyConfigPanel.presentControls()
def loadRecentWorldNumber(self, i):
worlds = list(self.recentWorlds())
if i - 1 < len(worlds):
self.loadFile(worlds[i - 1])
numRecentWorlds = 5
@staticmethod
def removeLevelDat(filename):
if filename.endswith("level.dat"):
filename = os.path.dirname(filename)
return filename
def recentWorlds(self):
worlds = []
for i in range(self.numRecentWorlds):
if config.config.has_option("Recent Worlds", str(i)):
try:
filename = (config.config.get("Recent Worlds", str(i)).decode('utf-8'))
worlds.append(self.removeLevelDat(filename))
except Exception, e:
logging.error(repr(e))
return list((f for f in worlds if f and os.path.exists(f)))
def addRecentWorld(self, filename):
filename = self.removeLevelDat(filename)
rw = list(self.recentWorlds())
if filename in rw:
return
rw = [filename] + rw[:self.numRecentWorlds - 1]
self.setRecentWorlds(rw)
@staticmethod
def setRecentWorlds(worlds):
for i, filename in enumerate(worlds):
config.config.set("Recent Worlds", str(i), filename.encode('utf-8'))
def makeSideColumn1(self):
def showLicense():
platform_open(os.path.join(directories.getDataDir(), "LICENSE.txt"))
def refresh():
PlayerCache().force_refresh()
hotkeys = ([("",
"Controls",
self.showKeyConfig),
("",
"Graphics",
self.showGraphicOptions),
("",
"Options",
self.showOptions),
("",
"Homepage",
lambda: platform_open("http://www.mcedit-unified.net"),
"http://www.mcedit-unified.net"),
("",
"About MCEdit",
lambda: platform_open("http://www.mcedit-unified.net/about.html"),
"http://www.mcedit-unified.net/about.html"),
("",
"License",
showLicense,
os.path.join(directories.getDataDir(), "LICENSE.txt")),
("",
"Refresh Player Names",
refresh)
])
c = albow.HotkeyColumn(hotkeys)
return c
def makeSideColumn2(self):
def showCacheDir():
try:
os.mkdir(directories.getCacheDir())
except OSError:
pass
platform_open(directories.getCacheDir())
def showScreenshotsDir():
try:
os.mkdir(os.path.join(directories.getCacheDir(), "screenshots"))
except OSError:
pass
platform_open(os.path.join(directories.getCacheDir(), "screenshots"))
hotkeys = ([("",
"Config Files",
showCacheDir,
directories.getCacheDir()),
("",
"Screenshots",
showScreenshotsDir,
os.path.join(directories.getCacheDir(), "screenshots"))
])
c = albow.HotkeyColumn(hotkeys)
return c
def resized(self, dw, dh):
"""
Handle window resizing events.
"""
if DEBUG_WM:
print "############################ RESIZED ############################"
(w, h) = self.size
config_w, config_h = config.settings.windowWidth.get(), config.settings.windowHeight.get()
win = self.displayContext.win
if DEBUG_WM and win:
print "dw", dw, "dh", dh
print "self.size (w, h) 1", self.size, "win.get_size", win.get_size()
print "size 1", config_w, config_h
elif DEBUG_WM and not win:
print "win is None, unable to print debug messages"
if win:
x, y = win.get_position()
if DEBUG_WM:
print "position", x, y
print "config pos", (config.settings.windowX.get(), config.settings.windowY.get())
if w == 0 and h == 0:
# The window has been minimized, no need to draw anything.
self.editor.renderer.render = False
return
# Mac window handling works better now, but `win`
# doesn't exist. So to get this alert to show up
# I'm checking if the platform is darwin. This only
# works because the code block never actually references
# `win`, otherwise it WOULD CRASH!!!
# You cannot change further if statements like this
# because they reference `win`
if win or sys.platform == "darwin":
# Handling too small resolutions.
# Dialog texts.
# "MCEdit does not support window resolutions below 1000x700.\nYou may not be able to access all functions at this resolution."
# New buttons:
# "Don't warn me again": disable the window popup across sessions.
# Tooltip: "Disable this message. Definitively. Even the next time you start MCEdit."
# "OK": dismiss the window and let go, don't pop up again for the session
# Tooltip: "Continue and not see this message until you restart MCEdit"
# "Cancel": resizes the window to the minimum size
# Tooltip: "Resize the window to the minimum recommended resolution."
# If the config showWindowSizeWarning is true and self.resizeAlert is true, show the popup
if (w < 1000 or h < 680) and config.settings.showWindowSizeWarning.get():
_w = w
_h = h
if self.resizeAlert:
answer = "_OK"
# Force the size only for the dimension that needs it.
if w < 1000 and h < 680:
_w = 1000
_h = 680
elif w < 1000:
_w = 1000
elif h < 680:
_h = 680
if not albow.dialogs.ask_tied_to:
answer = albow.ask(
"MCEdit does not support window resolutions below 1000x700.\nYou may not be able to access all functions at this resolution.",
["Don't remind me again.", "OK", "Cancel"], default=1, cancel=1,
responses_tooltips = {"Don't remind me again.": "Disable this message. Definitively. Even the next time you start MCEdit.",
"OK": "Continue and not see this message until you restart MCEdit",
"Cancel": "Resize the window to the minimum recommended resolution."},
tie_widget_to=True)
else:
if not albow.dialogs.ask_tied_to._visible:
albow.dialogs.ask_tied_to._visible = True
answer = albow.dialogs.ask_tied_to.present()
if answer == "Don't remind me again.":
config.settings.showWindowSizeWarning = False
self.resizeAlert = False
elif answer == "OK":
w, h = self.size
self.resizeAlert = False
elif answer == "Cancel":
w, h = _w, _h
else:
if albow.dialogs.ask_tied_to:
albow.dialogs.ask_tied_to.dismiss("_OK")
del albow.dialogs.ask_tied_to
albow.dialogs.ask_tied_to = None
elif (w >= 1000 or h >= 680):
if albow.dialogs.ask_tied_tos:
for ask_tied_to in albow.dialogs.ask_tied_tos:
ask_tied_to._visible = False
ask_tied_to.dismiss("_OK")
ask_tied_to.set_parent(None)
del ask_tied_to
if not win:
if w < 1000:
config.settings.windowWidth.set(1000)
w = 1000
x = config.settings.windowX.get()
if h < 680:
config.settings.windowHeight.set(680)
h = 680
y = config.settings.windowY.get()
if not self.editor.renderer.render:
self.editor.renderer.render = True
save_geom = True
if win:
maximized = win.get_state() == mcplatform.MAXIMIZED
sz = map(max, win.get_size(), (w, h))
if DEBUG_WM:
print "sz", sz
print "maximized", maximized, "self.maximized", self.maximized
if maximized:
if DEBUG_WM:
print "maximize, saving maximized size"
config.settings.windowMaximizedWidth.set(sz[0])
config.settings.windowMaximizedHeight.set(sz[1])
config.save()
self.saved_pos = config.settings.windowX.get(), config.settings.windowY.get()
save_geom = False
self.resizing = 0
win.set_mode(sz, self.displayContext.displayMode())
else:
if DEBUG_WM:
print "size 2", config.settings.windowWidth.get(), config.settings.windowHeight.get()
print "config_w", config_w, "config_h", config_h
print "pos", config.settings.windowX.get(), config.settings.windowY.get()
if self.maximized != maximized:
if DEBUG_WM:
print "restoring window pos and size"
print "(config.settings.windowX.get(), config.settings.windowY.get())", (config.settings.windowX.get(), config.settings.windowY.get())
(w, h) = (config_w, config_h)
win.set_state(1, (w, h), self.saved_pos)
else:
if DEBUG_WM:
print "window resized"
print "setting size to", (w, h), "and pos to", (x,y)
win.set_mode((w, h), self.displayContext.displayMode())
win.set_position((x, y))
config.settings.windowMaximizedWidth.set(0)
config.settings.windowMaximizedHeight.set(0)
config.save()
self.maximized = maximized
if DEBUG_WM:
print "self.size (w, h) 2", self.size, (w, h)
surf = pygame.display.get_surface()
print "display surf rect", surf.get_rect()
if win:
if hasattr(win.base_handler, 'get_geometry'):
print "win.base_handler geometry", win.base_handler.get_geometry()
print "win.base_handler.parent geometry", win.base_handler.query_tree().parent.get_geometry()
print "win.base_handler.parent.parent geometry", win.base_handler.query_tree().parent.query_tree().parent.get_geometry()
if save_geom:
config.settings.windowWidth.set(w)
config.settings.windowHeight.set(h)
config.save()
# The alert window is disabled if win is not None
if not win and (dw > 20 or dh > 20):
if not hasattr(self, 'resizeAlert'):
self.resizeAlert = self.shouldResizeAlert
if self.resizeAlert:
albow.alert(
"Window size increased. You may have problems using the cursor until MCEdit is restarted.")
self.resizeAlert = False
if win:
win.sync()
GLViewport.resized(self, dw, dh)
shouldResizeAlert = config.settings.shouldResizeAlert.property()
def loadFile(self, filename, addToRecent=True):
if os.path.exists(filename):
try:
self.editor.loadFile(filename, addToRecent=addToRecent)
except Exception, e:
logging.error(u'Failed to load file {0}: {1!r}'.format(
filename, e))
return None
self.remove(self.fileOpener)
self.fileOpener = None
if self.editor.level:
self.editor.size = self.size
self.add(self.editor)
self.focus_switch = self.editor
def createNewWorld(self):
level = self.editor.createNewLevel()
if level:
self.remove(self.fileOpener)
self.editor.size = self.size
self.add(self.editor)
self.focus_switch = self.editor
albow.alert(
"World created. To expand this infinite world, explore the world in Minecraft or use the Chunk Control tool to add or delete chunks.")
def removeEditor(self):
self.remove(self.editor)
self.fileOpener = albow.FileOpener(self)
self.add(self.fileOpener)
self.focus_switch = self.fileOpener
def confirm_quit(self):
#-# saving language template
if hasattr(albow.translate, "saveTemplate"):
albow.translate.saveTemplate()
#-#
self.saveWindowPosition()
config.save()
if self.editor.unsavedEdits:
# if config.settings.savePositionOnClose.get():
# self.editor.waypointManager.saveLastPosition(self.editor.mainViewport, self.editor.level.getPlayerDimension())
# self.editor.waypointManager.save()
result = albow.ask(_("There are {0} unsaved changes.").format(self.editor.unsavedEdits),
responses=["Save and Quit", "Quit", "Cancel"])
if result == "Save and Quit":
self.saveAndQuit()
elif result == "Quit":
self.justQuit()
elif result == "Cancel":
return False
else:
raise SystemExit
def saveAndQuit(self):
self.editor.saveFile()
raise SystemExit
@staticmethod
def justQuit():
raise SystemExit
@classmethod
def fetch_version(cls):
with cls.version_lock:
cls.version_info = release.fetch_new_version_info()
def check_for_version(self):
new_version = release.check_for_new_version(self.version_info)
if new_version is not False:
answer = albow.ask(
_('Version {} is available').format(new_version["tag_name"]),
[
'Download',
'View',
'Ignore'
],
default=1,
cancel=2
)
if answer == "View":
platform_open(new_version["html_url"])
elif answer == "Download":
platform_open(new_version["asset"]["browser_download_url"])
albow.alert(_(' {} should now be downloading via your browser. You will still need to extract the downloaded file to use the updated version.').format(new_version["asset"]["name"]))
@classmethod
def main(cls):
PlayerCache().load()
displayContext = GLDisplayContext(splash.splash, caption=(('MCEdit ~ ' + release.get_version()%_("for")).encode('utf-8'), 'MCEdit'))
os.environ['SDL_VIDEO_CENTERED'] = '0'
rootwidget = RootWidget(displayContext.display)
mcedit = MCEdit(displayContext)
rootwidget.displayContext = displayContext
rootwidget.confirm_quit = mcedit.confirm_quit
rootwidget.mcedit = mcedit
rootwidget.add(mcedit)
rootwidget.focus_switch = mcedit
if 0 == len(pymclevel.alphaMaterials.yamlDatas):
albow.alert("Failed to load minecraft.yaml. Check the console window for details.")
if mcedit.droppedLevel:
mcedit.loadFile(mcedit.droppedLevel)
cls.version_lock = threading.Lock()
cls.version_info = None
cls.version_checked = False
fetch_version_thread = threading.Thread(target=cls.fetch_version)
fetch_version_thread.start()
# Disabled old update code
# if hasattr(sys, 'frozen'):
# # We're being run from a bundle, check for updates.
# import esky
#
# app = esky.Esky(
# sys.executable.decode(sys.getfilesystemencoding()),
# 'https://bitbucket.org/codewarrior0/mcedit/downloads'
# )
# try:
# update_version = app.find_update()
# except:
# # FIXME: Horrible, hacky kludge.
# update_version = None
# logging.exception('Error while checking for updates')
#
# if update_version:
# answer = albow.ask(
# 'Version "%s" is available, would you like to '
# 'download it?' % update_version,
# [
# 'Yes',
# 'No',
# ],
# default=0,
# cancel=1
# )
# if answer == 'Yes':
# def callback(args):
# status = args['status']
# status_texts = {
# 'searching': u"Finding updates...",
# 'found': u"Found version {new_version}",
# 'downloading': u"Downloading: {received} / {size}",
# 'ready': u"Downloaded {path}",
# 'installing': u"Installing {new_version}",
# 'cleaning up': u"Cleaning up...",
# 'done': u"Done."
# }
# text = status_texts.get(status, 'Unknown').format(**args)
#
# panel = Dialog()
# panel.idleevent = lambda event: panel.dismiss()
# label = albow.Label(text, width=600)
# panel.add(label)
# panel.size = (500, 250)
# panel.present()
#
# try:
# app.auto_update(callback)
# except (esky.EskyVersionError, EnvironmentError):
# albow.alert(_("Failed to install update %s") % update_version)
# else:
# albow.alert(_("Version %s installed. Restart MCEdit to begin using it.") % update_version)
# raise SystemExit()
if config.settings.closeMinecraftWarning.get():
answer = albow.ask(
"Warning: Only open a world in one program at a time. If you open a world at the same time in MCEdit and in Minecraft, you will lose your work and possibly damage your save file.\n\n If you are using Minecraft 1.3 or earlier, you need to close Minecraft completely before you use MCEdit.",
["Don't remind me again.", "OK"], default=1, cancel=1)
if answer == "Don't remind me again.":
config.settings.closeMinecraftWarning.set(False)
# Disabled Crash Reporting Option
# if not config.settings.reportCrashesAsked.get():
# answer = albow.ask(
# "When an error occurs, MCEdit can report the details of the error to its developers. "
# "The error report will include your operating system version, MCEdit version, "
# "OpenGL version, plus the make and model of your CPU and graphics processor. No personal "
# "information will be collected.\n\n"
# "Error reporting can be enabled or disabled in the Options dialog.\n\n"
# "Enable error reporting?",
# ["Yes", "No"],
# default=0)
# config.settings.reportCrashes.set(answer == "Yes")
# config.settings.reportCrashesAsked.set(True)
config.settings.reportCrashes.set(False)
config.settings.reportCrashesAsked.set(True)
config.save()
if "update" in config.version.version.get():
answer = albow.ask("There are new default controls. Do you want to replace your current controls with the new ones?", ["Yes", "No"])
if answer == "Yes":
for configKey, k in keys.KeyConfigPanel.presets["WASD"]:
config.keys[config.convert(configKey)].set(k)
config.version.version.set("1.1.2.0")
config.save()
if "-causeError" in sys.argv:
raise ValueError("Error requested via -causeError")
while True:
try:
rootwidget.run()
except (SystemExit, KeyboardInterrupt):
print "Shutting down..."
exc_txt = traceback.format_exc()
if mcedit.editor.level:
if config.settings.savePositionOnClose.get():
mcedit.editor.waypointManager.saveLastPosition(mcedit.editor.mainViewport, mcedit.editor.level.dimNo)
mcedit.editor.waypointManager.save()
# The following Windows specific code won't be executed if we're using '--debug-wm' switch.
if not USE_WM and sys.platform == "win32" and config.settings.setWindowPlacement.get():
(flags, showCmd, ptMin, ptMax, rect) = mcplatform.win32gui.GetWindowPlacement(
display.get_wm_info()['window'])
X, Y, r, b = rect
#w = r-X
#h = b-Y
if (showCmd == mcplatform.win32con.SW_MINIMIZE or
showCmd == mcplatform.win32con.SW_SHOWMINIMIZED):
showCmd = mcplatform.win32con.SW_SHOWNORMAL
config.settings.windowX.set(X)
config.settings.windowY.set(Y)
config.settings.windowShowCmd.set(showCmd)
# Restore the previous language if we ran with '-tt' (update translation template).
if albow.translate.buildTemplate:
logging.warning('Restoring %s.'%orglang)
config.settings.langCode.set(orglang)
#
config.save()
mcedit.editor.renderer.discardAllChunks()
mcedit.editor.deleteAllCopiedSchematics()
if mcedit.editor.level:
mcedit.editor.level.close()
mcedit.editor.root.RemoveEditFiles()
if 'SystemExit' in traceback.format_exc() or 'KeyboardInterrupt' in traceback.format_exc():
raise
else:
if 'SystemExit' in exc_txt:
raise SystemExit
if 'KeyboardInterrupt' in exc_txt:
raise KeyboardInterrupt
except MemoryError:
traceback.print_exc()
mcedit.editor.handleMemoryError()
def saveWindowPosition(self):
"""Save the window position in the configuration handler."""
if DEBUG_WM:
print "############################ EXITING ############################"
win = self.displayContext.win
# The following Windows specific code will not be executed if we're using '--debug-wm' switch.
if not USE_WM and sys.platform == "win32" and config.settings.setWindowPlacement.get():
(flags, showCmd, ptMin, ptMax, rect) = mcplatform.win32gui.GetWindowPlacement(
display.get_wm_info()['window'])
X, Y, r, b = rect
#w = r-X
#h = b-Y
if (showCmd == mcplatform.win32con.SW_MINIMIZE or
showCmd == mcplatform.win32con.SW_SHOWMINIMIZED):
showCmd = mcplatform.win32con.SW_SHOWNORMAL
config.settings.windowX.set(X)
config.settings.windowY.set(Y)
config.settings.windowShowCmd.set(showCmd)
elif win:
config.settings.windowMaximized.set(self.maximized)
if not self.maximized:
x, y = win.get_position()
else:
x, y = self.saved_pos
if DEBUG_WM:
print "x", x, "y", y
config.settings.windowX.set(x)
config.settings.windowY.set(y)
def restart(self):
self.saveWindowPosition()
config.save()
self.editor.renderer.discardAllChunks()
self.editor.deleteAllCopiedSchematics()
if self.editor.level:
self.editor.level.close()
self.editor.root.RemoveEditFiles()
python = sys.executable
if sys.argv[0].endswith('.exe') or hasattr(sys, 'frozen'):
os.execl(python, python, * sys.argv[1:])
else:
os.execl(python, python, * sys.argv)
def main(argv):
"""
Setup display, bundled schematics. Handle unclean
shutdowns.
"""
# This should eventually be revived, what is "squash_python"?
# try:
# import squash_python
#
# squash_python.uploader.SquashUploader.headers.pop("Content-encoding", None)
# squash_python.uploader.SquashUploader.headers.pop("Accept-encoding", None)
#
# version = release.get_version()
# client = squash_python.get_client()
# client.APIKey = "6ea52b17-ac76-4fd8-8db4-2d7303473ca2"
# client.environment = "unknown"
# client.host = "http://pixelhost.ezekielelin.com"
# client.notifyPath = "/mcedit_bugs.php"
# client.build = version
# client.timeout = 5
#
# Disabled Crash Reporting Option
# client.disabled = not config.settings.reportCrashesNew.get()
# client.disabled = True
#
# def _reportingChanged(val):
# client.disabled = not val
#
# config.settings.reportCrashes.addObserver(client, '_enabled', _reportingChanged)
# client.reportErrors()
# client.hook()
# except (ImportError, UnicodeError) as e:
# pass
try:
display.init()
except pygame.error:
os.environ['SDL_VIDEODRIVER'] = 'directx'
try:
display.init()
except pygame.error:
os.environ['SDL_VIDEODRIVER'] = 'windib'
display.init()
pygame.font.init()
try:
if not os.path.exists(directories.schematicsDir):
shutil.copytree(
os.path.join(directories.getDataDir(), u'stock-schematics'),
directories.schematicsDir
)
except Exception, e:
logging.warning('Error copying bundled schematics: {0!r}'.format(e))
try:
os.mkdir(directories.schematicsDir)
except Exception, e:
logging.warning('Error creating schematics folder: {0!r}'.format(e))
try:
ServerJarStorage()
except Exception, e:
logging.warning('Error creating server jar storage folder: {0!r}'.format(e))
try:
MCEdit.main()
except Exception as e:
print "mcedit.main MCEdit exited with errors."
logging.error("MCEdit version %s", release.get_version())
display.quit()
if hasattr(sys, 'frozen') and sys.platform == 'win32':
logging.exception("%s", e)
print "Press RETURN or close this window to dismiss."
raw_input()
raise
return 0
def getSelectedMinecraftVersion():
profile = directories.getMinecraftProfileJSON()[directories.getSelectedProfile()]
if 'lastVersionId' in profile:
return profile['lastVersionId']
else:
return '1.8'
def getLatestMinecraftVersion(snapshots=False):
import urllib2
import json
versioninfo = json.loads(urllib2.urlopen("http://s3.amazonaws.com/Minecraft.Download/versions/versions.json ").read())
if snapshots:
return versioninfo['latest']['snapshot']
else:
return versioninfo['latest']['release']
def weird_fix():
try:
from OpenGL.platform import win32
except Exception:
pass
class FakeStdOutErr:
"""Fake file object to redirect very last Python output.
Used to track 'errors' not handled in MCEdit.
Mimics 'write' and 'close' file objects methods.
Used on Linux only."""
mode = 'a'
def __init__(self, *args, **kwargs):
"""*args and **kwargs are ignored.
Deletes the 'logger' object and reopen 'logfile' in append mode."""
global logger
global logfile
del logger
self.fd = open(logfile, 'a')
def write(self, msg):
self.fd.write(msg)
def close(self, *args, **kwargs):
self.fd.flush()
self.fd.close()
if __name__ == "__main__":
try:
main(sys.argv)
except (SystemExit, KeyboardInterrupt):
# It happens that on Linux, Python tries to kill already dead processes and display errors in the console.
# Redirecting them to the log file preserve them and other errors which may occur.
if sys.platform == "linux2":
logger.debug("MCEdit is exiting normally.")
logger.debug("Lines below this one are pure Python output.")
sys.stdout = sys.stderr = FakeStdOutErr()
pass
except:
traceback.print_exc()
print ""
print "=================================="
print "\t\t\t MCEdit has crashed"
print "=================================="
raw_input("Press the Enter key to close this window")
pass
#sys.exit(main(sys.argv))
|
__init__.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
pywebview is a lightweight cross-platform wrapper around a webview component that allows to display HTML content in its
own dedicated window. Works on Windows, OS X and Linux and compatible with Python 2 and 3.
(C) 2014-2019 Roman Sirokov and contributors
Licensed under BSD license
http://github.com/r0x0r/pywebview/
"""
import logging
import sys
import os
import re
import threading
from uuid import uuid4
from proxy_tools import module_property
from webview.event import Event
from webview.guilib import initialize
from webview.util import _token, base_uri, parse_file_type, escape_string, make_unicode, escape_line_breaks, WebViewException
from webview.window import Window
from .localization import original_localization
from .wsgi import Routing, StaticFiles, StaticResources
__all__ = (
# Stuff that's here
'start', 'create_window', 'token', 'screens'
# From wsgi
'Routing', 'StaticFiles', 'StaticResources',
# From event
'Event',
# from util
'_token', 'base_uri', 'parse_file_type', 'escape_string', 'make_unicode',
'escape_line_breaks', 'WebViewException',
# from window
'Window',
)
logger = logging.getLogger('pywebview')
handler = logging.StreamHandler()
formatter = logging.Formatter('[pywebview] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
log_level = logging.DEBUG if os.environ.get('PYWEBVIEW_LOG') == 'debug' else logging.INFO
logger.setLevel(log_level)
OPEN_DIALOG = 10
FOLDER_DIALOG = 20
SAVE_DIALOG = 30
DRAG_REGION_SELECTOR = '.pywebview-drag-region'
guilib = None
_debug = {
'mode': False
}
_user_agent = None
_multiprocessing = False
_http_server = False
token = _token
windows = []
def start(func=None, args=None, localization={}, gui=None, debug=False, http_server=False, user_agent=None):
"""
Start a GUI loop and display previously created windows. This function must
be called from a main thread.
:param func: Function to invoke upon starting the GUI loop.
:param args: Function arguments. Can be either a single value or a tuple of
values.
:param localization: A dictionary with localized strings. Default strings
and their keys are defined in localization.py.
:param gui: Force a specific GUI. Allowed values are ``cef``, ``qt``, or
``gtk`` depending on a platform.
:param debug: Enable debug mode. Default is False.
:param http_server: Enable built-in HTTP server. If enabled, local files
will be served using a local HTTP server on a random port. For each
window, a separate HTTP server is spawned. This option is ignored for
non-local URLs.
:param user_agent: Change user agent string. Not supported in EdgeHTML.
"""
global guilib, _debug, _multiprocessing, _http_server, _user_agent
def _create_children(other_windows):
if not windows[0].shown.wait(10):
raise WebViewException('Main window failed to load')
for window in other_windows:
guilib.create_window(window)
_debug['mode'] = debug
if debug:
logger.setLevel(logging.DEBUG)
_user_agent = user_agent
#_multiprocessing = multiprocessing
multiprocessing = False # TODO
_http_server = http_server
if multiprocessing:
from multiprocessing import Process as Thread
else:
from threading import Thread
original_localization.update(localization)
if threading.current_thread().name != 'MainThread':
raise WebViewException('This function must be run from a main thread.')
if len(windows) == 0:
raise WebViewException('You must create a window first before calling this function.')
guilib = initialize(gui)
for window in windows:
window._initialize(guilib, multiprocessing, http_server)
if len(windows) > 1:
t = Thread(target=_create_children, args=(windows[1:],))
t.start()
if func:
if args is not None:
if not hasattr(args, '__iter__'):
args = (args,)
t = Thread(target=func, args=args)
else:
t = Thread(target=func)
t.start()
guilib.create_window(windows[0])
def create_window(title, url=None, html=None, js_api=None, width=800, height=600, x=None, y=None,
resizable=True, fullscreen=False, min_size=(200, 100), hidden=False,
icon=None, frameless=False, easy_drag=True,
minimized=False, on_top=False, confirm_close=False, background_color='#FFFFFF',
transparent=False, text_select=False, localization=None):
"""
Create a web view window using a native GUI. The execution blocks after this function is invoked, so other
program logic must be executed in a separate thread.
:param title: Window title
:param url: URL to load
:param width: window width. Default is 800px
:param height:window height. Default is 600px
:param resizable True if window can be resized, False otherwise. Default is True
:param fullscreen: True if start in fullscreen mode. Default is False
:param min_size: a (width, height) tuple that specifies a minimum window size. Default is 200x100
:param hidden: Whether the window should be hidden.
:param icon: Window icon name.
:param frameless: Whether the window should have a frame.
:param easy_drag: Easy window drag mode when window is frameless.
:param minimized: Display window minimized
:param on_top: Keep window above other windows (required OS: Windows)
:param confirm_close: Display a window close confirmation dialog. Default is False
:param background_color: Background color as a hex string that is displayed before the content of webview is loaded. Default is white.
:param text_select: Allow text selection on page. Default is False.
:param transparent: Don't draw window background.
:return: window object.
"""
valid_color = r'^#(?:[0-9a-fA-F]{3}){1,2}$'
if not re.match(valid_color, background_color):
raise ValueError('{0} is not a valid hex triplet color'.format(background_color))
uid = 'master' if len(windows) == 0 else 'child_' + uuid4().hex[:8]
window = Window(uid, make_unicode(title), url, html,
width, height, x, y, resizable, fullscreen, min_size, hidden,
frameless, easy_drag, minimized, on_top, confirm_close, background_color,
js_api, text_select, transparent, localization, icon)
windows.append(window)
if threading.current_thread().name != 'MainThread' and guilib:
window._initialize(guilib, _multiprocessing, _http_server)
guilib.create_window(window)
return window
@module_property
def screens():
guilib = initialize()
screens = guilib.get_screens()
return screens
|
ssh.py | #!/usr/bin/env python
"""
DMLC submission script by ssh
One need to make sure all slaves machines are ssh-able.
"""
from __future__ import absolute_import
from multiprocessing import Pool, Process
import os, subprocess, logging
from threading import Thread
from . import tracker
def sync_dir(local_dir, slave_node, slave_dir):
"""
sync the working directory from root node into slave node
"""
remote = slave_node[0] + ':' + slave_dir
logging.info('rsync %s -> %s', local_dir, remote)
prog = 'rsync -az --rsh="ssh -o StrictHostKeyChecking=no -p %s" %s %s' % (
slave_node[1], local_dir, remote)
subprocess.check_call([prog], shell = True)
def get_env(pass_envs):
envs = []
# get system envs
keys = ['OMP_NUM_THREADS', 'KMP_AFFINITY', 'LD_LIBRARY_PATH', 'AWS_ACCESS_KEY_ID',
'AWS_SECRET_ACCESS_KEY', 'DMLC_INTERFACE']
for k in keys:
v = os.getenv(k)
if v is not None:
envs.append('export ' + k + '=' + v + ';')
# get ass_envs
for k, v in pass_envs.items():
envs.append('export ' + str(k) + '=' + str(v) + ';')
return (' '.join(envs))
def submit(args):
assert args.host_file is not None
with open(args.host_file) as f:
tmp = f.readlines()
assert len(tmp) > 0
hosts=[]
for h in tmp:
if len(h.strip()) > 0:
# parse addresses of the form ip:port
h = h.strip()
i = h.find(":")
p = "22"
if i != -1:
p = h[i+1:]
h = h[:i]
# hosts now contain the pair ip, port
hosts.append((h, p))
def ssh_submit(nworker, nserver, pass_envs):
"""
customized submit script
"""
# thread func to run the job
def run(prog):
subprocess.check_call(prog, shell = True)
# sync programs if necessary
local_dir = os.getcwd()+'/'
working_dir = local_dir
if args.sync_dst_dir is not None and args.sync_dst_dir != 'None':
working_dir = args.sync_dst_dir
pool = Pool(processes=len(hosts))
for h in hosts:
pool.apply_async(sync_dir, args=(local_dir, h, working_dir))
pool.close()
pool.join()
# launch jobs
for i in range(nworker + nserver):
pass_envs['DMLC_ROLE'] = 'server' if i < nserver else 'worker'
(node, port) = hosts[i % len(hosts)]
pass_envs['DMLC_NODE_HOST'] = node
prog = get_env(pass_envs) + ' cd ' + working_dir + '; ' + (' '.join(args.command))
prog = 'ssh -o StrictHostKeyChecking=no ' + node + ' -p ' + port + ' \'' + prog + '\''
thread = Thread(target = run, args=(prog,))
thread.setDaemon(True)
thread.start()
return ssh_submit
tracker.submit(args.num_workers, args.num_servers,
fun_submit=ssh_submit,
pscmd=(' '.join(args.command)),
hostIP=args.host_ip)
|
__init__.py | #!/usr/bin/python3 -OO
# Copyright 2007-2020 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Imported to be referenced from other files directly
from sabnzbd.version import __version__, __baseline__
import os
import logging
import datetime
import tempfile
import pickle
import gzip
import subprocess
import time
import socket
import cherrypy
import sys
import re
import ssl
from threading import Lock, Thread
##############################################################################
# Determine platform flags
##############################################################################
WIN32 = DARWIN = FOUNDATION = WIN64 = DOCKER = False
KERNEL32 = None
if os.name == "nt":
WIN32 = True
from util.apireg import del_connection_info
try:
import ctypes
KERNEL32 = ctypes.windll.LoadLibrary("Kernel32.dll")
except:
pass
elif os.name == "posix":
ORG_UMASK = os.umask(18)
os.umask(ORG_UMASK)
# Check if running in a Docker container
try:
with open("/proc/1/cgroup", "rt") as ifh:
DOCKER = ":/docker/" in ifh.read()
except:
pass
import platform
if platform.system().lower() == "darwin":
DARWIN = True
# 12 = Sierra, 11 = ElCaptain, 10 = Yosemite, 9 = Mavericks, 8 = MountainLion
DARWIN_VERSION = int(platform.mac_ver()[0].split(".")[1])
try:
import Foundation
import sabnzbd.utils.sleepless as sleepless
FOUNDATION = True
except:
pass
# Now we can import safely
from sabnzbd.nzbqueue import NzbQueue
from sabnzbd.postproc import PostProcessor
from sabnzbd.downloader import Downloader
from sabnzbd.assembler import Assembler
from sabnzbd.rating import Rating
import sabnzbd.misc as misc
import sabnzbd.filesystem as filesystem
import sabnzbd.powersup as powersup
from sabnzbd.dirscanner import DirScanner, process_nzb_archive_file, process_single_nzb
from sabnzbd.urlgrabber import URLGrabber
import sabnzbd.scheduler as scheduler
import sabnzbd.rss as rss
import sabnzbd.emailer as emailer
from sabnzbd.articlecache import ArticleCache
import sabnzbd.newsunpack
import sabnzbd.encoding as encoding
import sabnzbd.config as config
from sabnzbd.bpsmeter import BPSMeter
import sabnzbd.cfg as cfg
import sabnzbd.database
import sabnzbd.lang as lang
import sabnzbd.par2file as par2file
import sabnzbd.api
import sabnzbd.interface
import sabnzbd.nzbstuff as nzbstuff
import sabnzbd.directunpacker as directunpacker
from sabnzbd.decorators import synchronized
from sabnzbd.constants import (
NORMAL_PRIORITY,
VALID_ARCHIVES,
REPAIR_REQUEST,
QUEUE_FILE_NAME,
QUEUE_VERSION,
QUEUE_FILE_TMPL,
)
import sabnzbd.getipaddress as getipaddress
LINUX_POWER = powersup.HAVE_DBUS
START = datetime.datetime.now()
MY_NAME = None
MY_FULLNAME = None
RESTART_ARGS = []
NEW_VERSION = (None, None)
DIR_HOME = None
DIR_APPDATA = None
DIR_LCLDATA = None
DIR_PROG = None
DIR_INTERFACES = None
DIR_LANGUAGE = None
DIR_PID = None
QUEUECOMPLETE = None # stores the nice name of the action
QUEUECOMPLETEACTION = None # stores the name of the function to be called
QUEUECOMPLETEARG = None # stores an extra arguments that need to be passed
DAEMON = None
LOGFILE = None
WEBLOGFILE = None
LOGHANDLER = None
GUIHANDLER = None
LOG_ALL = False
AMBI_LOCALHOST = False
WIN_SERVICE = None # Instance of our Win32 Service Class
BROWSER_URL = None
CERTIFICATE_VALIDATION = True
NO_DOWNLOADING = False # When essentials are missing (SABYenc/par2/unrar)
WEB_DIR = None
WEB_DIR_CONFIG = None
WIZARD_DIR = None
WEB_COLOR = None
SABSTOP = False
RESTART_REQ = False
PAUSED_ALL = False
TRIGGER_RESTART = False # To trigger restart for Scheduler, WinService and Mac
WINTRAY = None # Thread for the Windows SysTray icon
WEBUI_READY = False
LAST_WARNING = None
LAST_ERROR = None
EXTERNAL_IPV6 = False
LAST_HISTORY_UPDATE = 1
# Performance measure for dashboard
PYSTONE_SCORE = 0
DOWNLOAD_DIR_SPEED = 0
COMPLETE_DIR_SPEED = 0
INTERNET_BANDWIDTH = 0
# Rendering of original command line arguments in Config
CMDLINE = " ".join(["\"%s\"" % arg for arg in sys.argv])
__INITIALIZED__ = False
__SHUTTING_DOWN__ = False
##############################################################################
# Signal Handler
##############################################################################
def sig_handler(signum=None, frame=None):
global SABSTOP, WINTRAY
if sabnzbd.WIN32 and signum is not None and DAEMON and signum == 5:
# Ignore the "logoff" event when running as a Win32 daemon
return True
if signum is not None:
logging.warning(T("Signal %s caught, saving and exiting..."), signum)
try:
save_state()
sabnzbd.zconfig.remove_server()
finally:
if sabnzbd.WIN32:
from util.apireg import del_connection_info
del_connection_info()
if sabnzbd.WINTRAY:
sabnzbd.WINTRAY.terminate = True
time.sleep(0.5)
else:
pid_file()
SABSTOP = True
os._exit(0)
##############################################################################
# Initializing
##############################################################################
INIT_LOCK = Lock()
def get_db_connection(thread_index=0):
# Create a connection and store it in the current thread
if not (hasattr(cherrypy.thread_data, "history_db") and cherrypy.thread_data.history_db):
cherrypy.thread_data.history_db = sabnzbd.database.HistoryDB()
return cherrypy.thread_data.history_db
@synchronized(INIT_LOCK)
def initialize(pause_downloader=False, clean_up=False, evalSched=False, repair=0):
global __INITIALIZED__, __SHUTTING_DOWN__, LOGFILE, WEBLOGFILE, LOGHANDLER, GUIHANDLER, AMBI_LOCALHOST, WAITEXIT, DAEMON, MY_NAME, MY_FULLNAME, NEW_VERSION, DIR_HOME, DIR_APPDATA, DIR_LCLDATA, DIR_PROG, DIR_INTERFACES, DARWIN, RESTART_REQ
if __INITIALIZED__:
return False
__SHUTTING_DOWN__ = False
# Set global database connection for Web-UI threads
cherrypy.engine.subscribe("start_thread", get_db_connection)
# Paused?
pause_downloader = pause_downloader or cfg.start_paused()
# Clean-up, if requested
if clean_up:
# New admin folder
filesystem.remove_all(cfg.admin_dir.get_path(), "*.sab")
# Optionally wait for "incomplete" to become online
if cfg.wait_for_dfolder():
wait_for_download_folder()
else:
cfg.download_dir.set(cfg.download_dir(), create=True)
cfg.download_dir.set_create(True)
# Set access rights for "incomplete" base folder
filesystem.set_permissions(cfg.download_dir.get_path(), recursive=False)
# If dirscan_dir cannot be created, set a proper value anyway.
# Maybe it's a network path that's temporarily missing.
path = cfg.dirscan_dir.get_path()
if not os.path.exists(path):
filesystem.create_real_path(cfg.dirscan_dir.ident(), "", path, False)
# Set call backs for Config items
cfg.cache_limit.callback(new_limit)
cfg.cherryhost.callback(guard_restart)
cfg.cherryport.callback(guard_restart)
cfg.web_dir.callback(guard_restart)
cfg.web_color.callback(guard_restart)
cfg.username.callback(guard_restart)
cfg.password.callback(guard_restart)
cfg.log_dir.callback(guard_restart)
cfg.https_port.callback(guard_restart)
cfg.https_cert.callback(guard_restart)
cfg.https_key.callback(guard_restart)
cfg.enable_https.callback(guard_restart)
cfg.top_only.callback(guard_top_only)
cfg.pause_on_post_processing.callback(guard_pause_on_pp)
cfg.quota_size.callback(guard_quota_size)
cfg.quota_day.callback(guard_quota_dp)
cfg.quota_period.callback(guard_quota_dp)
cfg.language.callback(guard_language)
cfg.enable_https_verification.callback(guard_https_ver)
guard_https_ver()
# Set cache limit
if not cfg.cache_limit() or (cfg.cache_limit() in ("200M", "450M") and (sabnzbd.WIN32 or sabnzbd.DARWIN)):
cfg.cache_limit.set(misc.get_cache_limit())
ArticleCache.do.new_limit(cfg.cache_limit.get_int())
check_incomplete_vs_complete()
# Set language files
lang.set_locale_info("SABnzbd", DIR_LANGUAGE)
lang.set_language(cfg.language())
sabnzbd.api.clear_trans_cache()
sabnzbd.change_queue_complete_action(cfg.queue_complete(), new=False)
# One time conversion "speedlimit" in schedules.
if not cfg.sched_converted():
schedules = cfg.schedules()
newsched = []
for sched in schedules:
if "speedlimit" in sched:
newsched.append(re.sub(r"(speedlimit \d+)$", r"\1K", sched))
else:
newsched.append(sched)
cfg.schedules.set(newsched)
cfg.sched_converted.set(1)
# Second time schedule conversion
if cfg.sched_converted() != 2:
cfg.schedules.set(["%s %s" % (1, schedule) for schedule in cfg.schedules()])
cfg.sched_converted.set(2)
config.save_config()
# Add hostname to the whitelist
if not cfg.host_whitelist():
cfg.host_whitelist.set(socket.gethostname())
# Do repair if requested
if check_repair_request():
repair = 2
pause_downloader = True
# Initialize threads
rss.init()
paused = BPSMeter.do.read()
NzbQueue()
Downloader(pause_downloader or paused)
Assembler()
PostProcessor()
NzbQueue.do.read_queue(repair)
DirScanner()
Rating()
URLGrabber()
scheduler.init()
if evalSched:
scheduler.analyse(pause_downloader)
logging.info("All processes started")
RESTART_REQ = False
__INITIALIZED__ = True
return True
@synchronized(INIT_LOCK)
def start():
global __INITIALIZED__
if __INITIALIZED__:
logging.debug("Starting postprocessor")
PostProcessor.do.start()
logging.debug("Starting assembler")
Assembler.do.start()
logging.debug("Starting downloader")
Downloader.do.start()
scheduler.start()
logging.debug("Starting dirscanner")
DirScanner.do.start()
Rating.do.start()
logging.debug("Starting urlgrabber")
URLGrabber.do.start()
@synchronized(INIT_LOCK)
def halt():
global __INITIALIZED__, __SHUTTING_DOWN__
if __INITIALIZED__:
logging.info("SABnzbd shutting down...")
__SHUTTING_DOWN__ = True
# Stop the windows tray icon
if sabnzbd.WINTRAY:
sabnzbd.WINTRAY.terminate = True
sabnzbd.zconfig.remove_server()
sabnzbd.directunpacker.abort_all()
rss.stop()
logging.debug("Stopping URLGrabber")
URLGrabber.do.stop()
try:
URLGrabber.do.join()
except:
pass
logging.debug("Stopping rating")
Rating.do.stop()
try:
Rating.do.join()
except:
pass
logging.debug("Stopping dirscanner")
DirScanner.do.stop()
try:
DirScanner.do.join()
except:
pass
# Stop Required Objects
logging.debug("Stopping downloader")
sabnzbd.downloader.stop()
logging.debug("Stopping assembler")
Assembler.do.stop()
try:
Assembler.do.join()
except:
pass
logging.debug("Stopping postprocessor")
PostProcessor.do.stop()
try:
PostProcessor.do.join()
except:
pass
# Save State
try:
save_state()
except:
logging.error(T("Fatal error at saving state"), exc_info=True)
# The Scheduler cannot be stopped when the stop was scheduled.
# Since all warm-restarts have been removed, it's not longer
# needed to stop the scheduler.
# We must tell the scheduler to deactivate.
scheduler.abort()
logging.info("All processes stopped")
__INITIALIZED__ = False
def trigger_restart(timeout=None):
""" Trigger a restart by setting a flag an shutting down CP """
# Sometimes we need to wait a bit to send good-bye to the browser
if timeout:
time.sleep(timeout)
# Add extra arguments
if sabnzbd.downloader.Downloader.do.paused:
sabnzbd.RESTART_ARGS.append("-p")
sys.argv = sabnzbd.RESTART_ARGS
# Stop all services
sabnzbd.halt()
cherrypy.engine.exit()
if sabnzbd.WIN32:
# Remove connection info for faster restart
del_connection_info()
# Leave the harder restarts to the polling in SABnzbd.py
if hasattr(sys, "frozen"):
sabnzbd.TRIGGER_RESTART = True
else:
# Do the restart right now
cherrypy.engine._do_execv()
##############################################################################
# Misc Wrappers
##############################################################################
def new_limit():
""" Callback for article cache changes """
ArticleCache.do.new_limit(cfg.cache_limit.get_int())
def guard_restart():
""" Callback for config options requiring a restart """
global RESTART_REQ
sabnzbd.RESTART_REQ = True
def guard_top_only():
""" Callback for change of top_only option """
NzbQueue.do.set_top_only(cfg.top_only())
def guard_pause_on_pp():
""" Callback for change of pause-download-on-pp """
if cfg.pause_on_post_processing():
pass # Not safe to idle downloader, because we don't know
# if post-processing is active now
else:
Downloader.do.resume_from_postproc()
def guard_quota_size():
""" Callback for change of quota_size """
BPSMeter.do.change_quota()
def guard_quota_dp():
""" Callback for change of quota_day or quota_period """
scheduler.restart(force=True)
def guard_language():
""" Callback for change of the interface language """
sabnzbd.lang.set_language(cfg.language())
sabnzbd.api.clear_trans_cache()
def set_https_verification(value):
""" Set HTTPS-verification state while returning current setting
False = disable verification
"""
prev = ssl._create_default_https_context == ssl.create_default_context
if value:
ssl._create_default_https_context = ssl.create_default_context
else:
ssl._create_default_https_context = ssl._create_unverified_context
return prev
def guard_https_ver():
""" Callback for change of https verification """
set_https_verification(cfg.enable_https_verification())
def add_url(url, pp=None, script=None, cat=None, priority=None, nzbname=None):
""" Add NZB based on a URL, attributes optional """
if "http" not in url:
return
if not pp or pp == "-1":
pp = None
if script and script.lower() == "default":
script = None
if cat and cat.lower() == "default":
cat = None
logging.info("Fetching %s", url)
# Add feed name if it came from RSS
msg = T("Trying to fetch NZB from %s") % url
if nzbname:
msg = "%s - %s" % (nzbname, msg)
# Generate the placeholder
future_nzo = NzbQueue.do.generate_future(msg, pp, script, cat, url=url, priority=priority, nzbname=nzbname)
URLGrabber.do.add(url, future_nzo)
return future_nzo.nzo_id
def save_state():
""" Save all internal bookkeeping to disk """
ArticleCache.do.flush_articles()
NzbQueue.do.save()
BPSMeter.do.save()
rss.save()
Rating.do.save()
DirScanner.do.save()
PostProcessor.do.save()
def pause_all():
""" Pause all activities than cause disk access """
global PAUSED_ALL
PAUSED_ALL = True
Downloader.do.pause()
logging.debug("PAUSED_ALL active")
def unpause_all():
""" Resume all activities """
global PAUSED_ALL
PAUSED_ALL = False
Downloader.do.resume()
logging.debug("PAUSED_ALL inactive")
##############################################################################
# NZB Saving Methods
##############################################################################
def backup_exists(filename):
""" Return True if backup exists and no_dupes is set """
path = cfg.nzb_backup_dir.get_path()
return path and os.path.exists(os.path.join(path, filename + ".gz"))
def backup_nzb(filename, data):
""" Backup NZB file """
path = cfg.nzb_backup_dir.get_path()
if path:
save_compressed(path, filename, data)
def save_compressed(folder, filename, data):
""" Save compressed NZB file in folder """
if filename.endswith(".nzb"):
filename += ".gz"
else:
filename += ".nzb.gz"
logging.info("Backing up %s", os.path.join(folder, filename))
try:
# Have to get around the path being put inside the tgz
with open(os.path.join(folder, filename), "wb") as tgz_file:
f = gzip.GzipFile(filename, fileobj=tgz_file)
f.write(encoding.utob(data))
f.flush()
f.close()
except:
logging.error(T("Saving %s failed"), os.path.join(folder, filename))
logging.info("Traceback: ", exc_info=True)
##############################################################################
# Unsynchronized methods
##############################################################################
def add_nzbfile(
nzbfile, pp=None, script=None, cat=None, priority=NORMAL_PRIORITY, nzbname=None, reuse=False, password=None
):
""" Add disk-based NZB file, optional attributes,
'reuse' flag will suppress duplicate detection
"""
if pp and pp == "-1":
pp = None
if script and script.lower() == "default":
script = None
if cat and cat.lower() == "default":
cat = None
if isinstance(nzbfile, str):
# File coming from queue repair
filename = nzbfile
keep = True
else:
# TODO: CherryPy mangles unicode-filenames!
# See https://github.com/cherrypy/cherrypy/issues/1766
filename = encoding.correct_unknown_encoding(nzbfile.filename)
keep = False
if not sabnzbd.WIN32:
# If windows client sends file to Unix server backslashes may
# be included, so convert these
filename = filename.replace("\\", "/")
filename = os.path.basename(filename)
ext = os.path.splitext(filename)[1]
if ext.lower() in VALID_ARCHIVES:
suffix = ext.lower()
else:
suffix = ".nzb"
logging.info("Adding %s", filename)
if isinstance(nzbfile, str):
path = nzbfile
else:
try:
nzb_file, path = tempfile.mkstemp(suffix=suffix)
os.write(nzb_file, nzbfile.value)
os.close(nzb_file)
except OSError:
logging.error(T("Cannot create temp file for %s"), filename)
logging.info("Traceback: ", exc_info=True)
return None
if ext.lower() in VALID_ARCHIVES:
return process_nzb_archive_file(
filename, path, pp, script, cat, priority=priority, nzbname=nzbname, password=password
)
else:
return process_single_nzb(
filename,
path,
pp,
script,
cat,
priority=priority,
nzbname=nzbname,
keep=keep,
reuse=reuse,
password=password,
)
def enable_server(server):
""" Enable server (scheduler only) """
try:
config.get_config("servers", server).enable.set(1)
except:
logging.warning(T("Trying to set status of non-existing server %s"), server)
return
config.save_config()
Downloader.do.update_server(server, server)
def disable_server(server):
""" Disable server (scheduler only) """
try:
config.get_config("servers", server).enable.set(0)
except:
logging.warning(T("Trying to set status of non-existing server %s"), server)
return
config.save_config()
Downloader.do.update_server(server, server)
def system_shutdown():
""" Shutdown system after halting download and saving bookkeeping """
logging.info("Performing system shutdown")
Thread(target=halt).start()
while __INITIALIZED__:
time.sleep(1.0)
if sabnzbd.WIN32:
powersup.win_shutdown()
elif DARWIN:
powersup.osx_shutdown()
else:
powersup.linux_shutdown()
def system_hibernate():
""" Hibernate system """
logging.info("Performing system hybernation")
if sabnzbd.WIN32:
powersup.win_hibernate()
elif DARWIN:
powersup.osx_hibernate()
else:
powersup.linux_hibernate()
def system_standby():
""" Standby system """
logging.info("Performing system standby")
if sabnzbd.WIN32:
powersup.win_standby()
elif DARWIN:
powersup.osx_standby()
else:
powersup.linux_standby()
def shutdown_program():
""" Stop program after halting and saving """
logging.info("[%s] Performing SABnzbd shutdown", misc.caller_name())
sabnzbd.halt()
cherrypy.engine.exit()
sabnzbd.SABSTOP = True
def restart_program():
""" Restart program (used by scheduler) """
logging.info("Scheduled restart request")
# Just set the stop flag, because stopping CherryPy from
# the scheduler is not reliable
sabnzbd.TRIGGER_RESTART = True
def change_queue_complete_action(action, new=True):
""" Action or script to be performed once the queue has been completed
Scripts are prefixed with 'script_'
When "new" is False, check whether non-script actions are acceptable
"""
global QUEUECOMPLETE, QUEUECOMPLETEACTION, QUEUECOMPLETEARG
_action = None
_argument = None
if "script_" in action:
# all scripts are labeled script_xxx
_action = run_script
_argument = action.replace("script_", "")
elif new or cfg.queue_complete_pers.get():
if action == "shutdown_pc":
_action = system_shutdown
elif action == "hibernate_pc":
_action = system_hibernate
elif action == "standby_pc":
_action = system_standby
elif action == "shutdown_program":
_action = shutdown_program
else:
action = None
else:
action = None
if new:
cfg.queue_complete.set(action or "")
config.save_config()
# keep the name of the action for matching the current select in queue.tmpl
QUEUECOMPLETE = action
QUEUECOMPLETEACTION = _action
QUEUECOMPLETEARG = _argument
def run_script(script):
""" Run a user script (queue complete only) """
command = [os.path.join(cfg.script_dir.get_path(), script)]
if os.path.exists(command[0]):
try:
stup, need_shell, command, creationflags = sabnzbd.newsunpack.build_command(command)
logging.info("Spawning external command %s", command)
subprocess.Popen(
command,
shell=need_shell,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
startupinfo=stup,
creationflags=creationflags,
)
except:
logging.debug("Failed script %s, Traceback: ", script, exc_info=True)
def empty_queues():
""" Return True if queues empty or non-existent """
global __INITIALIZED__
return (not __INITIALIZED__) or (PostProcessor.do.empty() and NzbQueue.do.is_empty())
def keep_awake():
""" If we still have work to do, keep Windows/OSX system awake """
if KERNEL32 or FOUNDATION:
if sabnzbd.cfg.keep_awake():
ES_CONTINUOUS = 0x80000000
ES_SYSTEM_REQUIRED = 0x00000001
if (not Downloader.do.is_paused() and not NzbQueue.do.is_empty()) or (
not PostProcessor.do.paused and not PostProcessor.do.empty()
):
if KERNEL32:
# Set ES_SYSTEM_REQUIRED until the next call
KERNEL32.SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED)
else:
sleepless.keep_awake("SABnzbd is busy downloading and/or post-processing")
else:
if KERNEL32:
# Allow the regular state again
KERNEL32.SetThreadExecutionState(ES_CONTINUOUS)
else:
sleepless.allow_sleep()
################################################################################
# Data IO #
################################################################################
def get_new_id(prefix, folder, check_list=None):
""" Return unique prefixed admin identifier within folder
optionally making sure that id is not in the check_list.
"""
for n in range(10000):
try:
if not os.path.exists(folder):
os.makedirs(folder)
fd, path = tempfile.mkstemp("", "SABnzbd_%s_" % prefix, folder)
os.close(fd)
head, tail = os.path.split(path)
if not check_list or tail not in check_list:
return tail
except:
logging.error(T("Failure in tempfile.mkstemp"))
logging.info("Traceback: ", exc_info=True)
break
# Cannot create unique id, crash the process
raise IOError
def save_data(data, _id, path, do_pickle=True, silent=False):
""" Save data to a diskfile """
if not silent:
logging.debug("[%s] Saving data for %s in %s", misc.caller_name(), _id, path)
path = os.path.join(path, _id)
# We try 3 times, to avoid any dict or access problems
for t in range(3):
try:
with open(path, "wb") as data_file:
if do_pickle:
pickle.dump(data, data_file, protocol=pickle.HIGHEST_PROTOCOL)
else:
data_file.write(data)
break
except:
if silent:
# This can happen, probably a removed folder
pass
elif t == 2:
logging.error(T("Saving %s failed"), path)
logging.info("Traceback: ", exc_info=True)
else:
# Wait a tiny bit before trying again
time.sleep(0.1)
def load_data(data_id, path, remove=True, do_pickle=True, silent=False):
""" Read data from disk file """
path = os.path.join(path, data_id)
if not os.path.exists(path):
logging.info("[%s] %s missing", misc.caller_name(), path)
return None
if not silent:
logging.debug("[%s] Loading data for %s from %s", misc.caller_name(), data_id, path)
try:
with open(path, "rb") as data_file:
if do_pickle:
try:
data = pickle.load(data_file, encoding=sabnzbd.encoding.CODEPAGE)
except UnicodeDecodeError:
# Could be Python 2 data that we can load using old encoding
data = pickle.load(data_file, encoding="latin1")
else:
data = data_file.read()
if remove:
filesystem.remove_file(path)
except:
logging.error(T("Loading %s failed"), path)
logging.info("Traceback: ", exc_info=True)
return None
return data
def remove_data(_id, path):
""" Remove admin file """
path = os.path.join(path, _id)
try:
if os.path.exists(path):
filesystem.remove_file(path)
except:
logging.debug("Failed to remove %s", path)
def save_admin(data, data_id):
""" Save data in admin folder in specified format """
logging.debug("[%s] Saving data for %s", misc.caller_name(), data_id)
save_data(data, data_id, cfg.admin_dir.get_path())
def load_admin(data_id, remove=False, silent=False):
""" Read data in admin folder in specified format """
logging.debug("[%s] Loading data for %s from %s", misc.caller_name(), data_id)
return load_data(data_id, cfg.admin_dir.get_path(), remove=remove, silent=silent)
def request_repair():
""" Request a full repair on next restart """
path = os.path.join(cfg.admin_dir.get_path(), REPAIR_REQUEST)
try:
with open(path, "w") as f:
f.write("\n")
except:
pass
def check_repair_request():
""" Return True if repair request found, remove afterwards """
path = os.path.join(cfg.admin_dir.get_path(), REPAIR_REQUEST)
if os.path.exists(path):
try:
filesystem.remove_file(path)
except:
pass
return True
return False
def check_all_tasks():
""" Check every task and restart safe ones, else restart program
Return True when everything is under control
"""
if __SHUTTING_DOWN__ or not __INITIALIZED__:
return True
# Non-restartable threads, require program restart
if not sabnzbd.PostProcessor.do.is_alive():
logging.info("Restarting because of crashed postprocessor")
return False
if not Downloader.do.is_alive():
logging.info("Restarting because of crashed downloader")
return False
if not Assembler.do.is_alive():
logging.info("Restarting because of crashed assembler")
return False
# Kick the downloader, in case it missed the semaphore
Downloader.do.wakeup()
# Make sure the right servers are active
Downloader.do.check_timers()
# Restartable threads
if not DirScanner.do.is_alive():
logging.info("Restarting crashed dirscanner")
DirScanner.do.__init__()
if not URLGrabber.do.is_alive():
logging.info("Restarting crashed urlgrabber")
URLGrabber.do.__init__()
if not Rating.do.is_alive():
logging.info("Restarting crashed rating")
Rating.do.__init__()
if not sabnzbd.scheduler.sched_check():
logging.info("Restarting crashed scheduler")
sabnzbd.scheduler.init()
sabnzbd.downloader.Downloader.do.unblock_all()
# Check one-shot pause
sabnzbd.scheduler.pause_check()
# Check (and terminate) idle jobs
sabnzbd.nzbqueue.NzbQueue.do.stop_idle_jobs()
return True
def pid_file(pid_path=None, pid_file=None, port=0):
""" Create or remove pid file """
global DIR_PID
if not sabnzbd.WIN32:
if pid_path and pid_path.startswith("/"):
DIR_PID = os.path.join(pid_path, "sabnzbd-%d.pid" % port)
elif pid_file and pid_file.startswith("/"):
DIR_PID = pid_file
if DIR_PID:
try:
if port:
with open(DIR_PID, "w") as f:
f.write("%d\n" % os.getpid())
else:
filesystem.remove_file(DIR_PID)
except:
logging.warning("Cannot access PID file %s", DIR_PID)
def check_incomplete_vs_complete():
""" Make sure "incomplete" and "complete" are not identical """
complete = cfg.complete_dir.get_path()
if filesystem.same_file(cfg.download_dir.get_path(), complete):
if filesystem.real_path("X", cfg.download_dir()) == cfg.download_dir():
# Abs path, so set an abs path too
cfg.download_dir.set(os.path.join(complete, "incomplete"))
else:
cfg.download_dir.set("incomplete")
def wait_for_download_folder():
""" Wait for download folder to become available """
while not cfg.download_dir.test_path():
logging.debug('Waiting for "incomplete" folder')
time.sleep(2.0)
# Required wrapper because nzbstuff.py cannot import downloader.py
def highest_server(me):
return sabnzbd.downloader.Downloader.do.highest_server(me)
def test_ipv6():
""" Check if external IPv6 addresses are reachable """
if not cfg.selftest_host():
# User disabled the test, assume active IPv6
return True
try:
info = getipaddress.addresslookup6(cfg.selftest_host())
except:
logging.debug(
"Test IPv6: Disabling IPv6, because it looks like it's not available. Reason: %s", sys.exc_info()[0]
)
return False
try:
af, socktype, proto, canonname, sa = info[0]
sock = socket.socket(af, socktype, proto)
sock.settimeout(2) # 2 second timeout
sock.connect(sa[0:2])
sock.close()
logging.debug("Test IPv6: IPv6 test successful. Enabling IPv6")
return True
except socket.error:
logging.debug("Test IPv6: Cannot reach IPv6 test host. Disabling IPv6")
return False
except:
logging.debug("Test IPv6: Problem during IPv6 connect. Disabling IPv6. Reason: %s", sys.exc_info()[0])
return False
def test_cert_checking():
""" Test quality of certificate validation """
# User disabled the test, assume proper SSL certificates
if not cfg.selftest_host():
return True
# Try a connection to our test-host
try:
ctx = ssl.create_default_context()
base_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_sock = ctx.wrap_socket(base_sock, server_hostname=cfg.selftest_host())
ssl_sock.settimeout(2.0)
ssl_sock.connect((cfg.selftest_host(), 443))
ssl_sock.close()
return True
except (socket.gaierror, socket.timeout):
# Non-SSL related error.
# We now assume that certificates work instead of forcing
# lower quality just because some (temporary) internet problem
logging.info("Could not determine system certificate validation quality due to connection problems")
return True
except:
# Seems something is still wrong
sabnzbd.set_https_verification(False)
return False
def history_updated():
""" To make sure we always have a fresh history """
sabnzbd.LAST_HISTORY_UPDATE += 1
# Never go over the limit
if sabnzbd.LAST_HISTORY_UPDATE + 1 >= sys.maxsize:
sabnzbd.LAST_HISTORY_UPDATE = 1
|
output.py | import cv2
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Circle
import os
import time
from skimage.io import imread, imshow, concatenate_images
from skimage.transform import resize
from PIL import Image,ImageDraw
import time
#from skimage.io import imread_collection
import argparse
import glob
from processing import *
from processing import *
import multiprocessing as mp
from multiprocessing import Pool, Process
import time
start = time.perf_counter()
"""This function gets the figure only and the transformer results and draw
a bounding box on the figure only to segment the subfigures
"""
def resize_boundingbox(index):
print('Image is going to processed : ', index)
parser = get_args()
args = parser.parse_args()
# original images
img_dir = args.file_path
img_only, rel_path = figure_only(index)
# img_path = os.path.join(img_dir, rel_path)
output_dir = args.outputDirectory
# Transformer prediction images
transformer_pred_dir = args.TransformerDirectory
transf_img_rel_paths = os.listdir(transformer_pred_dir)
transf_pred_path = os.path.join(transformer_pred_dir, rel_path)
all_coordinates = []
filename = os.path.join(output_dir, rel_path)
# Read the original and prediction images
try:
ROI_number = 0
# img_orig = cv2.imread(img_path)
img = img_only.copy()
pred_orig = cv2.imread(transf_pred_path)
preds = pred_orig.copy()
"""
get the height and width of the original image: this will be used for converting the resized image
back to the original dimension
"""
orx = img.shape[1]
ory = img.shape[0]
scalex = orx / 128
scaley = ory / 128
# Added code by me
gray = cv2.cvtColor(preds, cv2.COLOR_BGR2GRAY)
canny_get_edge = cv2.Canny(gray, 40, 250)
# Perform a little bit of morphology:
# Set kernel (structuring element) size:
kernelSize = (3, 3)
# Set operation iterations:
opIterations = 1
# Get the structuring element:
morphKernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernelSize)
# Perform Dilate:
morphology = cv2.morphologyEx(canny_get_edge, cv2.MORPH_CLOSE, morphKernel, None, None, opIterations, cv2.BORDER_REFLECT101) # preds
contours, hierarchy = cv2.findContours(morphology, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:]
im = img_only.copy()
all_coordinates=[]
for c in contours:
rect = cv2.boundingRect(c)
if rect[2] < 5 or rect[3] < 5: continue
cv2.contourArea(c)
x, y, w, h = rect
x = int(x*scalex)
y = int(y*scaley)
w = int(w* scalex)
h = int(h * scaley)
# cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 2)
ROI = im[y:y + h, x:x + w]
#cv2.imwrite('ROI_{}_{}.png'.format(i, ROI_number), ROI)
cv2.imwrite(filename[:-4] + "_{}.png".format(ROI_number), ROI)
ROI_number = ROI_number + 1
coordinates=(x, y, w, h)
all_coordinates.append(coordinates)
except Exception as error:
print(error)
# print(filename)
# return all_coordinates
print('**************************** Resized the bounding Box ******************************* ')
"""This function fine tune the amazon rekognition results of the labels"""
def finetune_label(label_name):
prefixes = ("FIG", "figure", "Figure", "fig", "FIGURE", "Fig")
prefixes2 = ("FIG.", "figure.", "Figure.", "fig.", "FIGURE.", "Fig.")
suffix = "."
new_label_name = {}
try:
if len(label_name) == 1:
for k, v in label_name.items():
new_label_name[k] = v
else:
keys = list(label_name.keys())
val = list(label_name.values())
left = 0
right = 1
while right < len(keys) and left < len(keys):
# First value starts with "FIG" and ends with a number
if (val[left].startswith(prefixes) or val[left].startswith(prefixes2)) and val[left].endswith(tuple("0123456789")):
new_label_name[keys[left]] = val[left]
left += 1
# Second starts with a "FIG" and ends with a number
elif (val[right].startswith(prefixes)or val[right].startswith(prefixes2)) and val[right].endswith(tuple("0123456789")):
new_label_name[keys[right]] = val[right]
right += 1
# first starts with a number and second starts with a "FIG" and ends with a number
elif val[left].startswith(tuple("0123456789")) and val[right].startswith(prefixes) and val[right].endswith(suffix):
name = val[right] + val[left]
new_label_name[keys[left]] = name
left += 1
right += 1
# first starts with a number and second starts with a "FIG"
elif val[left].startswith(tuple("0123456789")) and val[right].startswith(prefixes):
name = val[right] + " " + val[left]
new_label_name[keys[left]] = name
left += 1
right += 1
# first and second start with a number
elif val[left].startswith(tuple("0123456789")) and val[right].startswith(tuple("0123456789")):
right += 1
# first starts with a "FIG" and second starts with a number
elif val[left].startswith(prefixes) and val[right].startswith(tuple("0123456789")):
left += 1
right += 1
else:
right += 1
except Exception as error:
print(error)
return new_label_name
"""Function to generate the final json file with all the metadata"""
def patent_json(index):
json_name = {}
sub_list = []
# get patent id
parser = get_args()
args = parser.parse_args()
img_paths = args.file_path
try:
_, img_path = figure_only(index)
_, label_name = extract_label_bboxes(index)
# call the function to format the label_name
new_label_name = finetune_label(label_name)
# case for figure with subfigures
json_name['patent_id'] = os.path.splitext(img_path)[0] # [:19]
json_name["Figure_file"] = img_path
json_name["n_subfigures"] = len(new_label_name)
resize_boundingbox(index)
# get the figure number . e.g. Fig.2
for key, val in new_label_name.items():
sub_file = {}
num = ""
for c in val:
if c.isdigit():
num += c
sub_file["subfigure_id"] = int(num) if num.isdigit() else (num + ".")
sub_file["subfigure_file"] = (img_path[:-4] + "_" + num + '.png') if num.isdigit() else (img_path[:-4] + "_" + "." + '.png')
sub_file["subfigure_label"] = val
sub_list.append(sub_file)
json_name['subfigurefile'] = sub_list
except Exception as error:
print(error)
return json_name
#def output_json():
parser = get_args()
args = parser.parse_args()
json_output = args.jsonDirectory
amazon_paths = args.amazonDirectory
rel_paths = os.listdir(amazon_paths)
try:
with open(os.path.join(json_output, 'design2019.json'), 'w', encoding='utf-8') as fp:
for i in range(len(rel_paths)):
sample = patent_json(i)
json.dump(sample, fp, ensure_ascii=False)
fp.write("\n")
fp.close()
print("Done!")
except Exception as error:
print(error)
return f"Total segmented images: {len(rel_paths)}"
#results = output_json()
#print(results)
parser = get_args()
args = parser.parse_args()
json_output = args.jsonDirectory
amazon_paths = args.amazonDirectory
rel_paths = os.listdir(amazon_paths)
indices = list(range(len(rel_paths)))
if __name__ == "__main__":
fp = open(os.path.join(json_output, 'design2019.json'), 'w', encoding='utf-8',)
p = mp.cpu_count()
process = Pool(p)
sample = process.map(patent_json, indices)
json.dump(sample, fp, ensure_ascii=False,)
fp.write('\n')
process.close()
process.join()
#if __name__ == "__main__":
# processes = []
# num_cpu = mp.cpu_count()
# for _ in range(num_cpu):
# p = Process(target=output_json)
# p.start()
# processes.append(p)
# for process in processes:
# process.join()
#results = output_json()
#print(results)
finish = time.perf_counter()
print("Finished in {} seconds".format(finish-start))
|
merlin_flow_start_new.py | # (C) Copyright 2016-2021 Xilinx, Inc.
# All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from multiprocessing import Process
import os
import sys
import shutil
import time
import resource
space = " "
arg_list = sys.argv
frontground_execution = 1
arg_str = ""
for arg in arg_list[2:]:
if arg == "-regression" or arg == "-front" or arg == "-f":
frontground_execution = 1
elif arg == "-background" or arg == "-b":
frontground_execution = 0
elif arg == "-hls":
arg_str += " hls"
elif arg == "-impl":
arg_str += " bit"
elif arg == "impl":
arg_str += " bit"
elif arg == "-bit":
arg_str += " bit"
elif arg == "-test":
arg_str += " test"
elif arg == "-host":
arg_str += " host"
elif arg == "-sim":
arg_str += " sim"
else:
arg_str += " " + arg
if (arg_list[1] == "-v"):
os.system(
"mars_perl ${MARS_BUILD_DIR}/scripts/merlin_flow/merlin_flow_top.pl -v |& tee merlin.log ")
sys.exit()
################################
# The project argumnet can be a directory or a prj file
project_arg = arg_list[1]
flow_arg = "opt"
if (len(arg_list) >= 3):
flow_arg = arg_list[2]
if (flow_arg[0] == "-"):
flow_arg = "opt"
project_file = ""
if (os.path.isfile(project_arg)):
project_file = project_arg
elif (os.path.isdir(project_arg)):
project_abs_dir = os.path.abspath(project_arg)
project_name = os.path.basename(project_abs_dir)
project_file = os.path.join(project_arg, project_name + ".prj")
if (project_file == "" or (not os.path.isfile(project_file))):
print("[merlin_flow] ERROR: project file or directory does not exist: " + project_arg + " \n")
# print("[merlin_flow] project file : "+project_file+" \n");
sys.exit()
full_file = project_file
project_dir, project_file = os.path.split(full_file)
flow_log_file = "merlin_" + flow_arg + ".log"
os.system("rm -rf .merlin.finish")
def call_merlin_flow():
start_time = time.time()
if not os.path.exists(project_dir + "/report"):
os.system("mkdir " + project_dir + "/report")
if frontground_execution:
os.system("cd " + project_dir + "; mars_perl ${MARS_BUILD_DIR}/scripts/merlin_flow/merlin_flow_top.pl " +
project_file + " " + arg_str + " |& tee " + flow_log_file)
else:
os.system("cd " + project_dir + "; mars_perl ${MARS_BUILD_DIR}/scripts/merlin_flow/merlin_flow_top.pl " +
project_file + " " + arg_str + " |& tee report/merlin.log &")
# hack, add time and memory usage info here, because perl is not good at calling getrusage API
ru = resource.getrusage(resource.RUSAGE_CHILDREN)
#*** Compilation finished
# end_msg = '''
#
#Total time: {0:.2f} seconds
#Peak memory usage: {1:.2f} Mbyte\n'''.format(time.time() - start_time, ru.ru_maxrss / 1024.0)
# print end_msg
# if not os.path.exists(project_dir + "/report"):
# os.system("mkdir " + project_dir + "/report")
# with open(os.path.join(project_dir, 'report', 'perf_msg.log'), 'w') as f:
# f.write(end_msg)
# if os.path.isfile(os.path.join(project_dir, flow_log_file)):
# with open(os.path.join(project_dir, flow_log_file), 'r') as src:
# with open(os.path.join(project_dir, "report", "merlin.log"), 'a') as dst:
# dst.write(src.read())
# dst.write(end_msg)
## os.system("rm -rf "+project_dir+"/*.log");
p1 = Process(target=call_merlin_flow)
p1.start()
#p2 = Process(target = func2)
# p2.start()
xml_file = project_dir + "/spec/directive.xml"
# print(xml_file)
import xml.etree.cElementTree as ET
tree = ET.parse(open(xml_file, "r"))
root = tree.getroot()
debug_flag = root.findall("ihatebug")
# print(debug_flag)
if debug_flag:
print("")
# print("Debug mode enabled\n")
else:
try:
while p1.is_alive():
time.sleep(1)
except KeyboardInterrupt:
time.sleep(0.5)
if os.path.exists(os.path.join(project_dir, 'implement', 'code_transform')):
shutil.rmtree(os.path.join(project_dir, 'implement', 'code_transform'))
if os.path.exists(os.path.join(project_dir, 'implement', 'opencl_gen')):
shutil.rmtree(os.path.join(project_dir, 'implement', 'opencl_gen'))
print('\nPressed Ctrl+C! Merlin Flow end!\n')
sys.exit()
|
Misc.py | ## @file
# Common routines used by all tools
#
# Copyright (c) 2007 - 2017, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import sys
import string
import thread
import threading
import time
import re
import cPickle
import array
import shutil
from struct import pack
from UserDict import IterableUserDict
from UserList import UserList
from Common import EdkLogger as EdkLogger
from Common import GlobalData as GlobalData
from DataType import *
from BuildToolError import *
from CommonDataClass.DataClass import *
from Parsing import GetSplitValueList
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
## Regular expression used to find out place holders in string template
gPlaceholderPattern = re.compile("\$\{([^$()\s]+)\}", re.MULTILINE | re.UNICODE)
## Dictionary used to store file time stamp for quick re-access
gFileTimeStampCache = {} # {file path : file time stamp}
## Dictionary used to store dependencies of files
gDependencyDatabase = {} # arch : {file path : [dependent files list]}
def GetVariableOffset(mapfilepath, efifilepath, varnames):
""" Parse map file to get variable offset in current EFI file
@param mapfilepath Map file absolution path
@param efifilepath: EFI binary file full path
@param varnames iteratable container whose elements are variable names to be searched
@return List whos elements are tuple with variable name and raw offset
"""
lines = []
try:
f = open(mapfilepath, 'r')
lines = f.readlines()
f.close()
except:
return None
if len(lines) == 0: return None
firstline = lines[0].strip()
if (firstline.startswith("Archive member included ") and
firstline.endswith(" file (symbol)")):
return _parseForGCC(lines, efifilepath, varnames)
return _parseGeneral(lines, efifilepath, varnames)
def _parseForGCC(lines, efifilepath, varnames):
""" Parse map file generated by GCC linker """
status = 0
sections = []
varoffset = []
for index, line in enumerate(lines):
line = line.strip()
# status machine transection
if status == 0 and line == "Memory Configuration":
status = 1
continue
elif status == 1 and line == 'Linker script and memory map':
status = 2
continue
elif status ==2 and line == 'START GROUP':
status = 3
continue
# status handler
if status == 3:
m = re.match('^([\w_\.]+) +([\da-fA-Fx]+) +([\da-fA-Fx]+)$', line)
if m != None:
sections.append(m.groups(0))
for varname in varnames:
Str = ''
m = re.match("^.data.(%s)" % varname, line)
if m != None:
m = re.match(".data.(%s)$" % varname, line)
if m != None:
Str = lines[index + 1]
else:
Str = line[len(".data.%s" % varname):]
if Str:
m = re.match('^([\da-fA-Fx]+) +([\da-fA-Fx]+)', Str.strip())
if m != None:
varoffset.append((varname, int(m.groups(0)[0], 16) , int(sections[-1][1], 16), sections[-1][0]))
if not varoffset:
return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs == None or len(efisecs) == 0:
return []
#redirection
redirection = 0
for efisec in efisecs:
for section in sections:
if section[0].strip() == efisec[0].strip() and section[0].strip() == '.text':
redirection = int(section[1], 16) - efisec[1]
ret = []
for var in varoffset:
for efisec in efisecs:
if var[1] >= efisec[1] and var[1] < efisec[1]+efisec[3]:
ret.append((var[0], hex(efisec[2] + var[1] - efisec[1] - redirection)))
return ret
def _parseGeneral(lines, efifilepath, varnames):
status = 0 #0 - beginning of file; 1 - PE section definition; 2 - symbol table
secs = [] # key = section name
varoffset = []
secRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\da-fA-F]+)[Hh]? +([.\w\$]+) +(\w+)', re.UNICODE)
symRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\.:\\\\\w\?@\$]+) +([\da-fA-F]+)', re.UNICODE)
for line in lines:
line = line.strip()
if re.match("^Start[' ']+Length[' ']+Name[' ']+Class", line):
status = 1
continue
if re.match("^Address[' ']+Publics by Value[' ']+Rva\+Base", line):
status = 2
continue
if re.match("^entry point at", line):
status = 3
continue
if status == 1 and len(line) != 0:
m = secRe.match(line)
assert m != None, "Fail to parse the section in map file , line is %s" % line
sec_no, sec_start, sec_length, sec_name, sec_class = m.groups(0)
secs.append([int(sec_no, 16), int(sec_start, 16), int(sec_length, 16), sec_name, sec_class])
if status == 2 and len(line) != 0:
for varname in varnames:
m = symRe.match(line)
assert m != None, "Fail to parse the symbol in map file, line is %s" % line
sec_no, sym_offset, sym_name, vir_addr = m.groups(0)
sec_no = int(sec_no, 16)
sym_offset = int(sym_offset, 16)
vir_addr = int(vir_addr, 16)
m2 = re.match('^[_]*(%s)' % varname, sym_name)
if m2 != None:
# fond a binary pcd entry in map file
for sec in secs:
if sec[0] == sec_no and (sym_offset >= sec[1] and sym_offset < sec[1] + sec[2]):
varoffset.append([varname, sec[3], sym_offset, vir_addr, sec_no])
if not varoffset: return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs == None or len(efisecs) == 0:
return []
ret = []
for var in varoffset:
index = 0
for efisec in efisecs:
index = index + 1
if var[1].strip() == efisec[0].strip():
ret.append((var[0], hex(efisec[2] + var[2])))
elif var[4] == index:
ret.append((var[0], hex(efisec[2] + var[2])))
return ret
## Routine to process duplicated INF
#
# This function is called by following two cases:
# Case 1 in DSC:
# [components.arch]
# Pkg/module/module.inf
# Pkg/module/module.inf {
# <Defines>
# FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836
# }
# Case 2 in FDF:
# INF Pkg/module/module.inf
# INF FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836 Pkg/module/module.inf
#
# This function copies Pkg/module/module.inf to
# Conf/.cache/0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf
#
# @param Path Original PathClass object
# @param BaseName New file base name
#
# @retval return the new PathClass object
#
def ProcessDuplicatedInf(Path, BaseName, Workspace):
Filename = os.path.split(Path.File)[1]
if '.' in Filename:
Filename = BaseName + Path.BaseName + Filename[Filename.rfind('.'):]
else:
Filename = BaseName + Path.BaseName
#
# If -N is specified on command line, cache is disabled
# The directory has to be created
#
DbDir = os.path.split(GlobalData.gDatabasePath)[0]
if not os.path.exists(DbDir):
os.makedirs(DbDir)
#
# A temporary INF is copied to database path which must have write permission
# The temporary will be removed at the end of build
# In case of name conflict, the file name is
# FILE_GUIDBaseName (0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf)
#
TempFullPath = os.path.join(DbDir,
Filename)
RtPath = PathClass(Path.File, Workspace)
#
# Modify the full path to temporary path, keep other unchanged
#
# To build same module more than once, the module path with FILE_GUID overridden has
# the file name FILE_GUIDmodule.inf, but the relative path (self.MetaFile.File) is the real path
# in DSC which is used as relative path by C files and other files in INF.
# A trick was used: all module paths are PathClass instances, after the initialization
# of PathClass, the PathClass.Path is overridden by the temporary INF path.
#
# The reason for creating a temporary INF is:
# Platform.Modules which is the base to create ModuleAutoGen objects is a dictionary,
# the key is the full path of INF, the value is an object to save overridden library instances, PCDs.
# A different key for the same module is needed to create different output directory,
# retrieve overridden PCDs, library instances.
#
# The BaseName is the FILE_GUID which is also the output directory name.
#
#
RtPath.Path = TempFullPath
RtPath.BaseName = BaseName
#
# If file exists, compare contents
#
if os.path.exists(TempFullPath):
with open(str(Path), 'rb') as f1: Src = f1.read()
with open(TempFullPath, 'rb') as f2: Dst = f2.read()
if Src == Dst:
return RtPath
GlobalData.gTempInfs.append(TempFullPath)
shutil.copy2(str(Path), TempFullPath)
return RtPath
## Remove temporary created INFs whose paths were saved in gTempInfs
#
def ClearDuplicatedInf():
for File in GlobalData.gTempInfs:
if os.path.exists(File):
os.remove(File)
## callback routine for processing variable option
#
# This function can be used to process variable number of option values. The
# typical usage of it is specify architecure list on command line.
# (e.g. <tool> -a IA32 X64 IPF)
#
# @param Option Standard callback function parameter
# @param OptionString Standard callback function parameter
# @param Value Standard callback function parameter
# @param Parser Standard callback function parameter
#
# @retval
#
def ProcessVariableArgument(Option, OptionString, Value, Parser):
assert Value is None
Value = []
RawArgs = Parser.rargs
while RawArgs:
Arg = RawArgs[0]
if (Arg[:2] == "--" and len(Arg) > 2) or \
(Arg[:1] == "-" and len(Arg) > 1 and Arg[1] != "-"):
break
Value.append(Arg)
del RawArgs[0]
setattr(Parser.values, Option.dest, Value)
## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C structure style
#
# @param Guid The GUID string
#
# @retval string The GUID string in C structure style
#
def GuidStringToGuidStructureString(Guid):
GuidList = Guid.split('-')
Result = '{'
for Index in range(0, 3, 1):
Result = Result + '0x' + GuidList[Index] + ', '
Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]
for Index in range(0, 12, 2):
Result = Result + ', 0x' + GuidList[4][Index:Index + 2]
Result += '}}'
return Result
## Convert GUID structure in byte array to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in byte array
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureByteArrayToGuidString(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 16:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[3], 16),
int(guidValueList[2], 16),
int(guidValueList[1], 16),
int(guidValueList[0], 16),
int(guidValueList[5], 16),
int(guidValueList[4], 16),
int(guidValueList[7], 16),
int(guidValueList[6], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16),
int(guidValueList[11], 16),
int(guidValueList[12], 16),
int(guidValueList[13], 16),
int(guidValueList[14], 16),
int(guidValueList[15], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureStringToGuidString(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx format
#
def GuidStructureStringToGuidValueName(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
EdkLogger.error(None, FORMAT_INVALID, "Invalid GUID value string [%s]" % GuidValue)
return "%08x_%04x_%04x_%02x%02x_%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
## Create directories
#
# @param Directory The directory name
#
def CreateDirectory(Directory):
if Directory == None or Directory.strip() == "":
return True
try:
if not os.access(Directory, os.F_OK):
os.makedirs(Directory)
except:
return False
return True
## Remove directories, including files and sub-directories in it
#
# @param Directory The directory name
#
def RemoveDirectory(Directory, Recursively=False):
if Directory == None or Directory.strip() == "" or not os.path.exists(Directory):
return
if Recursively:
CurrentDirectory = os.getcwd()
os.chdir(Directory)
for File in os.listdir("."):
if os.path.isdir(File):
RemoveDirectory(File, Recursively)
else:
os.remove(File)
os.chdir(CurrentDirectory)
os.rmdir(Directory)
## Check if given file is changed or not
#
# This method is used to check if a file is changed or not between two build
# actions. It makes use a cache to store files timestamp.
#
# @param File The path of file
#
# @retval True If the given file is changed, doesn't exist, or can't be
# found in timestamp cache
# @retval False If the given file is changed
#
def IsChanged(File):
if not os.path.exists(File):
return True
FileState = os.stat(File)
TimeStamp = FileState[-2]
if File in gFileTimeStampCache and TimeStamp == gFileTimeStampCache[File]:
FileChanged = False
else:
FileChanged = True
gFileTimeStampCache[File] = TimeStamp
return FileChanged
## Store content in file
#
# This method is used to save file only when its content is changed. This is
# quite useful for "make" system to decide what will be re-built and what won't.
#
# @param File The path of file
# @param Content The new content of the file
# @param IsBinaryFile The flag indicating if the file is binary file or not
#
# @retval True If the file content is changed and the file is renewed
# @retval False If the file content is the same
#
def SaveFileOnChange(File, Content, IsBinaryFile=True):
if not IsBinaryFile:
Content = Content.replace("\n", os.linesep)
if os.path.exists(File):
try:
if Content == open(File, "rb").read():
return False
except:
EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
DirName = os.path.dirname(File)
if not CreateDirectory(DirName):
EdkLogger.error(None, FILE_CREATE_FAILURE, "Could not create directory %s" % DirName)
else:
if DirName == '':
DirName = os.getcwd()
if not os.access(DirName, os.W_OK):
EdkLogger.error(None, PERMISSION_FAILURE, "Do not have write permission on directory %s" % DirName)
try:
if GlobalData.gIsWindows:
try:
from PyUtility import SaveFileToDisk
if not SaveFileToDisk(File, Content):
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData=File)
except:
Fd = open(File, "wb")
Fd.write(Content)
Fd.close()
else:
Fd = open(File, "wb")
Fd.write(Content)
Fd.close()
except IOError, X:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)
return True
## Make a Python object persistent on file system
#
# @param Data The object to be stored in file
# @param File The path of file to store the object
#
def DataDump(Data, File):
Fd = None
try:
Fd = open(File, 'wb')
cPickle.dump(Data, Fd, cPickle.HIGHEST_PROTOCOL)
except:
EdkLogger.error("", FILE_OPEN_FAILURE, ExtraData=File, RaiseError=False)
finally:
if Fd != None:
Fd.close()
## Restore a Python object from a file
#
# @param File The path of file stored the object
#
# @retval object A python object
# @retval None If failure in file operation
#
def DataRestore(File):
Data = None
Fd = None
try:
Fd = open(File, 'rb')
Data = cPickle.load(Fd)
except Exception, e:
EdkLogger.verbose("Failed to load [%s]\n\t%s" % (File, str(e)))
Data = None
finally:
if Fd != None:
Fd.close()
return Data
## Retrieve and cache the real path name in file system
#
# @param Root The root directory of path relative to
#
# @retval str The path string if the path exists
# @retval None If path doesn't exist
#
class DirCache:
_CACHE_ = set()
_UPPER_CACHE_ = {}
def __init__(self, Root):
self._Root = Root
for F in os.listdir(Root):
self._CACHE_.add(F)
self._UPPER_CACHE_[F.upper()] = F
# =[] operator
def __getitem__(self, Path):
Path = Path[len(os.path.commonprefix([Path, self._Root])):]
if not Path:
return self._Root
if Path and Path[0] == os.path.sep:
Path = Path[1:]
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
UpperPath = Path.upper()
if UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
IndexList = []
LastSepIndex = -1
SepIndex = Path.find(os.path.sep)
while SepIndex > -1:
Parent = UpperPath[:SepIndex]
if Parent not in self._UPPER_CACHE_:
break
LastSepIndex = SepIndex
SepIndex = Path.find(os.path.sep, LastSepIndex + 1)
if LastSepIndex == -1:
return None
Cwd = os.getcwd()
os.chdir(self._Root)
SepIndex = LastSepIndex
while SepIndex > -1:
Parent = Path[:SepIndex]
ParentKey = UpperPath[:SepIndex]
if ParentKey not in self._UPPER_CACHE_:
os.chdir(Cwd)
return None
if Parent in self._CACHE_:
ParentDir = Parent
else:
ParentDir = self._UPPER_CACHE_[ParentKey]
for F in os.listdir(ParentDir):
Dir = os.path.join(ParentDir, F)
self._CACHE_.add(Dir)
self._UPPER_CACHE_[Dir.upper()] = Dir
SepIndex = Path.find(os.path.sep, SepIndex + 1)
os.chdir(Cwd)
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
elif UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
return None
## Get all files of a directory
#
# @param Root: Root dir
# @param SkipList : The files need be skipped
#
# @retval A list of all files
#
def GetFiles(Root, SkipList=None, FullPath=True):
OriPath = Root
FileList = []
for Root, Dirs, Files in os.walk(Root):
if SkipList:
for Item in SkipList:
if Item in Dirs:
Dirs.remove(Item)
for File in Files:
File = os.path.normpath(os.path.join(Root, File))
if not FullPath:
File = File[len(OriPath) + 1:]
FileList.append(File)
return FileList
## Check if gvien file exists or not
#
# @param File File name or path to be checked
# @param Dir The directory the file is relative to
#
# @retval True if file exists
# @retval False if file doesn't exists
#
def ValidFile(File, Ext=None):
if Ext != None:
Dummy, FileExt = os.path.splitext(File)
if FileExt.lower() != Ext.lower():
return False
if not os.path.exists(File):
return False
return True
def RealPath(File, Dir='', OverrideDir=''):
NewFile = os.path.normpath(os.path.join(Dir, File))
NewFile = GlobalData.gAllFiles[NewFile]
if not NewFile and OverrideDir:
NewFile = os.path.normpath(os.path.join(OverrideDir, File))
NewFile = GlobalData.gAllFiles[NewFile]
return NewFile
def RealPath2(File, Dir='', OverrideDir=''):
NewFile = None
if OverrideDir:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile:
if OverrideDir[-1] == os.path.sep:
return NewFile[len(OverrideDir):], NewFile[0:len(OverrideDir)]
else:
return NewFile[len(OverrideDir) + 1:], NewFile[0:len(OverrideDir)]
if GlobalData.gAllFiles:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(Dir, File))]
if not NewFile:
NewFile = os.path.normpath(os.path.join(Dir, File))
if not os.path.exists(NewFile):
return None, None
if NewFile:
if Dir:
if Dir[-1] == os.path.sep:
return NewFile[len(Dir):], NewFile[0:len(Dir)]
else:
return NewFile[len(Dir) + 1:], NewFile[0:len(Dir)]
else:
return NewFile, ''
return None, None
## Check if gvien file exists or not
#
#
def ValidFile2(AllFiles, File, Ext=None, Workspace='', EfiSource='', EdkSource='', Dir='.', OverrideDir=''):
NewFile = File
if Ext != None:
Dummy, FileExt = os.path.splitext(File)
if FileExt.lower() != Ext.lower():
return False, File
# Replace the Edk macros
if OverrideDir != '' and OverrideDir != None:
if OverrideDir.find('$(EFI_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EFI_SOURCE)', EfiSource)
if OverrideDir.find('$(EDK_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EDK_SOURCE)', EdkSource)
# Replace the default dir to current dir
if Dir == '.':
Dir = os.getcwd()
Dir = Dir[len(Workspace) + 1:]
# First check if File has Edk definition itself
if File.find('$(EFI_SOURCE)') > -1 or File.find('$(EDK_SOURCE)') > -1:
NewFile = File.replace('$(EFI_SOURCE)', EfiSource)
NewFile = NewFile.replace('$(EDK_SOURCE)', EdkSource)
NewFile = AllFiles[os.path.normpath(NewFile)]
if NewFile != None:
return True, NewFile
# Second check the path with override value
if OverrideDir != '' and OverrideDir != None:
NewFile = AllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile != None:
return True, NewFile
# Last check the path with normal definitions
File = os.path.join(Dir, File)
NewFile = AllFiles[os.path.normpath(File)]
if NewFile != None:
return True, NewFile
return False, File
## Check if gvien file exists or not
#
#
def ValidFile3(AllFiles, File, Workspace='', EfiSource='', EdkSource='', Dir='.', OverrideDir=''):
# Replace the Edk macros
if OverrideDir != '' and OverrideDir != None:
if OverrideDir.find('$(EFI_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EFI_SOURCE)', EfiSource)
if OverrideDir.find('$(EDK_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EDK_SOURCE)', EdkSource)
# Replace the default dir to current dir
# Dir is current module dir related to workspace
if Dir == '.':
Dir = os.getcwd()
Dir = Dir[len(Workspace) + 1:]
NewFile = File
RelaPath = AllFiles[os.path.normpath(Dir)]
NewRelaPath = RelaPath
while(True):
# First check if File has Edk definition itself
if File.find('$(EFI_SOURCE)') > -1 or File.find('$(EDK_SOURCE)') > -1:
File = File.replace('$(EFI_SOURCE)', EfiSource)
File = File.replace('$(EDK_SOURCE)', EdkSource)
NewFile = AllFiles[os.path.normpath(File)]
if NewFile != None:
NewRelaPath = os.path.dirname(NewFile)
File = os.path.basename(NewFile)
#NewRelaPath = NewFile[:len(NewFile) - len(File.replace("..\\", '').replace("../", '')) - 1]
break
# Second check the path with override value
if OverrideDir != '' and OverrideDir != None:
NewFile = AllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile != None:
#NewRelaPath = os.path.dirname(NewFile)
NewRelaPath = NewFile[:len(NewFile) - len(File.replace("..\\", '').replace("../", '')) - 1]
break
# Last check the path with normal definitions
NewFile = AllFiles[os.path.normpath(os.path.join(Dir, File))]
if NewFile != None:
break
# No file found
break
return NewRelaPath, RelaPath, File
def GetRelPath(Path1, Path2):
FileName = os.path.basename(Path2)
L1 = os.path.normpath(Path1).split(os.path.normpath('/'))
L2 = os.path.normpath(Path2).split(os.path.normpath('/'))
for Index in range(0, len(L1)):
if L1[Index] != L2[Index]:
FileName = '../' * (len(L1) - Index)
for Index2 in range(Index, len(L2)):
FileName = os.path.join(FileName, L2[Index2])
break
return os.path.normpath(FileName)
## Get GUID value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def GuidValue(CName, PackageList, Inffile = None):
for P in PackageList:
GuidKeys = P.Guids.keys()
if Inffile and P._PrivateGuids:
if not Inffile.startswith(P.MetaFile.Dir):
GuidKeys = (dict.fromkeys(x for x in P.Guids if x not in P._PrivateGuids)).keys()
if CName in GuidKeys:
return P.Guids[CName]
return None
## Get Protocol value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def ProtocolValue(CName, PackageList, Inffile = None):
for P in PackageList:
ProtocolKeys = P.Protocols.keys()
if Inffile and P._PrivateProtocols:
if not Inffile.startswith(P.MetaFile.Dir):
ProtocolKeys = (dict.fromkeys(x for x in P.Protocols if x not in P._PrivateProtocols)).keys()
if CName in ProtocolKeys:
return P.Protocols[CName]
return None
## Get PPI value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def PpiValue(CName, PackageList, Inffile = None):
for P in PackageList:
PpiKeys = P.Ppis.keys()
if Inffile and P._PrivatePpis:
if not Inffile.startswith(P.MetaFile.Dir):
PpiKeys = (dict.fromkeys(x for x in P.Ppis if x not in P._PrivatePpis)).keys()
if CName in PpiKeys:
return P.Ppis[CName]
return None
## A string template class
#
# This class implements a template for string replacement. A string template
# looks like following
#
# ${BEGIN} other_string ${placeholder_name} other_string ${END}
#
# The string between ${BEGIN} and ${END} will be repeated as many times as the
# length of "placeholder_name", which is a list passed through a dict. The
# "placeholder_name" is the key name of the dict. The ${BEGIN} and ${END} can
# be not used and, in this case, the "placeholder_name" must not a list and it
# will just be replaced once.
#
class TemplateString(object):
_REPEAT_START_FLAG = "BEGIN"
_REPEAT_END_FLAG = "END"
class Section(object):
_LIST_TYPES = [type([]), type(set()), type((0,))]
def __init__(self, TemplateSection, PlaceHolderList):
self._Template = TemplateSection
self._PlaceHolderList = []
# Split the section into sub-sections according to the position of placeholders
if PlaceHolderList:
self._SubSectionList = []
SubSectionStart = 0
#
# The placeholders passed in must be in the format of
#
# PlaceHolderName, PlaceHolderStartPoint, PlaceHolderEndPoint
#
for PlaceHolder, Start, End in PlaceHolderList:
self._SubSectionList.append(TemplateSection[SubSectionStart:Start])
self._SubSectionList.append(TemplateSection[Start:End])
self._PlaceHolderList.append(PlaceHolder)
SubSectionStart = End
if SubSectionStart < len(TemplateSection):
self._SubSectionList.append(TemplateSection[SubSectionStart:])
else:
self._SubSectionList = [TemplateSection]
def __str__(self):
return self._Template + " : " + str(self._PlaceHolderList)
def Instantiate(self, PlaceHolderValues):
RepeatTime = -1
RepeatPlaceHolders = {}
NonRepeatPlaceHolders = {}
for PlaceHolder in self._PlaceHolderList:
if PlaceHolder not in PlaceHolderValues:
continue
Value = PlaceHolderValues[PlaceHolder]
if type(Value) in self._LIST_TYPES:
if RepeatTime < 0:
RepeatTime = len(Value)
elif RepeatTime != len(Value):
EdkLogger.error(
"TemplateString",
PARAMETER_INVALID,
"${%s} has different repeat time from others!" % PlaceHolder,
ExtraData=str(self._Template)
)
RepeatPlaceHolders["${%s}" % PlaceHolder] = Value
else:
NonRepeatPlaceHolders["${%s}" % PlaceHolder] = Value
if NonRepeatPlaceHolders:
StringList = []
for S in self._SubSectionList:
if S not in NonRepeatPlaceHolders:
StringList.append(S)
else:
StringList.append(str(NonRepeatPlaceHolders[S]))
else:
StringList = self._SubSectionList
if RepeatPlaceHolders:
TempStringList = []
for Index in range(RepeatTime):
for S in StringList:
if S not in RepeatPlaceHolders:
TempStringList.append(S)
else:
TempStringList.append(str(RepeatPlaceHolders[S][Index]))
StringList = TempStringList
return "".join(StringList)
## Constructor
def __init__(self, Template=None):
self.String = ''
self.IsBinary = False
self._Template = Template
self._TemplateSectionList = self._Parse(Template)
## str() operator
#
# @retval string The string replaced
#
def __str__(self):
return self.String
## Split the template string into fragments per the ${BEGIN} and ${END} flags
#
# @retval list A list of TemplateString.Section objects
#
def _Parse(self, Template):
SectionStart = 0
SearchFrom = 0
MatchEnd = 0
PlaceHolderList = []
TemplateSectionList = []
while Template:
MatchObj = gPlaceholderPattern.search(Template, SearchFrom)
if not MatchObj:
if MatchEnd <= len(Template):
TemplateSection = TemplateString.Section(Template[SectionStart:], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
break
MatchString = MatchObj.group(1)
MatchStart = MatchObj.start()
MatchEnd = MatchObj.end()
if MatchString == self._REPEAT_START_FLAG:
if MatchStart > SectionStart:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
elif MatchString == self._REPEAT_END_FLAG:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
else:
PlaceHolderList.append((MatchString, MatchStart - SectionStart, MatchEnd - SectionStart))
SearchFrom = MatchEnd
return TemplateSectionList
## Replace the string template with dictionary of placeholders and append it to previous one
#
# @param AppendString The string template to append
# @param Dictionary The placeholder dictionaries
#
def Append(self, AppendString, Dictionary=None):
if Dictionary:
SectionList = self._Parse(AppendString)
self.String += "".join([S.Instantiate(Dictionary) for S in SectionList])
else:
self.String += AppendString
## Replace the string template with dictionary of placeholders
#
# @param Dictionary The placeholder dictionaries
#
# @retval str The string replaced with placeholder values
#
def Replace(self, Dictionary=None):
return "".join([S.Instantiate(Dictionary) for S in self._TemplateSectionList])
## Progress indicator class
#
# This class makes use of thread to print progress on console.
#
class Progressor:
# for avoiding deadloop
_StopFlag = None
_ProgressThread = None
_CheckInterval = 0.25
## Constructor
#
# @param OpenMessage The string printed before progress charaters
# @param CloseMessage The string printed after progress charaters
# @param ProgressChar The charater used to indicate the progress
# @param Interval The interval in seconds between two progress charaters
#
def __init__(self, OpenMessage="", CloseMessage="", ProgressChar='.', Interval=1.0):
self.PromptMessage = OpenMessage
self.CodaMessage = CloseMessage
self.ProgressChar = ProgressChar
self.Interval = Interval
if Progressor._StopFlag == None:
Progressor._StopFlag = threading.Event()
## Start to print progress charater
#
# @param OpenMessage The string printed before progress charaters
#
def Start(self, OpenMessage=None):
if OpenMessage != None:
self.PromptMessage = OpenMessage
Progressor._StopFlag.clear()
if Progressor._ProgressThread == None:
Progressor._ProgressThread = threading.Thread(target=self._ProgressThreadEntry)
Progressor._ProgressThread.setDaemon(False)
Progressor._ProgressThread.start()
## Stop printing progress charater
#
# @param CloseMessage The string printed after progress charaters
#
def Stop(self, CloseMessage=None):
OriginalCodaMessage = self.CodaMessage
if CloseMessage != None:
self.CodaMessage = CloseMessage
self.Abort()
self.CodaMessage = OriginalCodaMessage
## Thread entry method
def _ProgressThreadEntry(self):
sys.stdout.write(self.PromptMessage + " ")
sys.stdout.flush()
TimeUp = 0.0
while not Progressor._StopFlag.isSet():
if TimeUp <= 0.0:
sys.stdout.write(self.ProgressChar)
sys.stdout.flush()
TimeUp = self.Interval
time.sleep(self._CheckInterval)
TimeUp -= self._CheckInterval
sys.stdout.write(" " + self.CodaMessage + "\n")
sys.stdout.flush()
## Abort the progress display
@staticmethod
def Abort():
if Progressor._StopFlag != None:
Progressor._StopFlag.set()
if Progressor._ProgressThread != None:
Progressor._ProgressThread.join()
Progressor._ProgressThread = None
## A dict which can access its keys and/or values orderly
#
# The class implements a new kind of dict which its keys or values can be
# accessed in the order they are added into the dict. It guarantees the order
# by making use of an internal list to keep a copy of keys.
#
class sdict(IterableUserDict):
## Constructor
def __init__(self):
IterableUserDict.__init__(self)
self._key_list = []
## [] operator
def __setitem__(self, key, value):
if key not in self._key_list:
self._key_list.append(key)
IterableUserDict.__setitem__(self, key, value)
## del operator
def __delitem__(self, key):
self._key_list.remove(key)
IterableUserDict.__delitem__(self, key)
## used in "for k in dict" loop to ensure the correct order
def __iter__(self):
return self.iterkeys()
## len() support
def __len__(self):
return len(self._key_list)
## "in" test support
def __contains__(self, key):
return key in self._key_list
## indexof support
def index(self, key):
return self._key_list.index(key)
## insert support
def insert(self, key, newkey, newvalue, order):
index = self._key_list.index(key)
if order == 'BEFORE':
self._key_list.insert(index, newkey)
IterableUserDict.__setitem__(self, newkey, newvalue)
elif order == 'AFTER':
self._key_list.insert(index + 1, newkey)
IterableUserDict.__setitem__(self, newkey, newvalue)
## append support
def append(self, sdict):
for key in sdict:
if key not in self._key_list:
self._key_list.append(key)
IterableUserDict.__setitem__(self, key, sdict[key])
def has_key(self, key):
return key in self._key_list
## Empty the dict
def clear(self):
self._key_list = []
IterableUserDict.clear(self)
## Return a copy of keys
def keys(self):
keys = []
for key in self._key_list:
keys.append(key)
return keys
## Return a copy of values
def values(self):
values = []
for key in self._key_list:
values.append(self[key])
return values
## Return a copy of (key, value) list
def items(self):
items = []
for key in self._key_list:
items.append((key, self[key]))
return items
## Iteration support
def iteritems(self):
return iter(self.items())
## Keys interation support
def iterkeys(self):
return iter(self.keys())
## Values interation support
def itervalues(self):
return iter(self.values())
## Return value related to a key, and remove the (key, value) from the dict
def pop(self, key, *dv):
value = None
if key in self._key_list:
value = self[key]
self.__delitem__(key)
elif len(dv) != 0 :
value = kv[0]
return value
## Return (key, value) pair, and remove the (key, value) from the dict
def popitem(self):
key = self._key_list[-1]
value = self[key]
self.__delitem__(key)
return key, value
def update(self, dict=None, **kwargs):
if dict != None:
for k, v in dict.items():
self[k] = v
if len(kwargs):
for k, v in kwargs.items():
self[k] = v
## Dictionary with restricted keys
#
class rdict(dict):
## Constructor
def __init__(self, KeyList):
for Key in KeyList:
dict.__setitem__(self, Key, "")
## []= operator
def __setitem__(self, key, value):
if key not in self:
EdkLogger.error("RestrictedDict", ATTRIBUTE_SET_FAILURE, "Key [%s] is not allowed" % key,
ExtraData=", ".join(dict.keys(self)))
dict.__setitem__(self, key, value)
## =[] operator
def __getitem__(self, key):
if key not in self:
return ""
return dict.__getitem__(self, key)
## del operator
def __delitem__(self, key):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="del")
## Empty the dict
def clear(self):
for Key in self:
self.__setitem__(Key, "")
## Return value related to a key, and remove the (key, value) from the dict
def pop(self, key, *dv):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="pop")
## Return (key, value) pair, and remove the (key, value) from the dict
def popitem(self):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="popitem")
## Dictionary using prioritized list as key
#
class tdict:
_ListType = type([])
_TupleType = type(())
_Wildcard = 'COMMON'
_ValidWildcardList = ['COMMON', 'DEFAULT', 'ALL', '*', 'PLATFORM']
def __init__(self, _Single_=False, _Level_=2):
self._Level_ = _Level_
self.data = {}
self._Single_ = _Single_
# =[] operator
def __getitem__(self, key):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
elif self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey == None or str(FirstKey).upper() in self._ValidWildcardList:
FirstKey = self._Wildcard
if self._Single_:
return self._GetSingleValue(FirstKey, RestKeys)
else:
return self._GetAllValues(FirstKey, RestKeys)
def _GetSingleValue(self, FirstKey, RestKeys):
Value = None
#print "%s-%s" % (FirstKey, self._Level_) ,
if self._Level_ > 1:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value == None:
for Key in self.data:
Value = self.data[Key][RestKeys]
if Value != None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value == None and self._Wildcard in self.data:
#print "Value=None"
Value = self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey]
if Value == None:
for Key in self.data:
Value = self.data[Key]
if Value != None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey]
elif self._Wildcard in self.data:
Value = self.data[self._Wildcard]
return Value
def _GetAllValues(self, FirstKey, RestKeys):
Value = []
if self._Level_ > 1:
if FirstKey == self._Wildcard:
for Key in self.data:
Value += self.data[Key][RestKeys]
else:
if FirstKey in self.data:
Value += self.data[FirstKey][RestKeys]
if self._Wildcard in self.data:
Value += self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
for Key in self.data:
Value.append(self.data[Key])
else:
if FirstKey in self.data:
Value.append(self.data[FirstKey])
if self._Wildcard in self.data:
Value.append(self.data[self._Wildcard])
return Value
## []= operator
def __setitem__(self, key, value):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
else:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey in self._ValidWildcardList:
FirstKey = self._Wildcard
if FirstKey not in self.data and self._Level_ > 0:
self.data[FirstKey] = tdict(self._Single_, self._Level_ - 1)
if self._Level_ > 1:
self.data[FirstKey][RestKeys] = value
else:
self.data[FirstKey] = value
def SetGreedyMode(self):
self._Single_ = False
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetGreedyMode()
def SetSingleMode(self):
self._Single_ = True
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetSingleMode()
def GetKeys(self, KeyIndex=0):
assert KeyIndex >= 0
if KeyIndex == 0:
return set(self.data.keys())
else:
keys = set()
for Key in self.data:
keys |= self.data[Key].GetKeys(KeyIndex - 1)
return keys
## Boolean chain list
#
class Blist(UserList):
def __init__(self, initlist=None):
UserList.__init__(self, initlist)
def __setitem__(self, i, item):
if item not in [True, False]:
if item == 0:
item = False
else:
item = True
self.data[i] = item
def _GetResult(self):
Value = True
for item in self.data:
Value &= item
return Value
Result = property(_GetResult)
def ParseConsoleLog(Filename):
Opr = open(os.path.normpath(Filename), 'r')
Opw = open(os.path.normpath(Filename + '.New'), 'w+')
for Line in Opr.readlines():
if Line.find('.efi') > -1:
Line = Line[Line.rfind(' ') : Line.rfind('.efi')].strip()
Opw.write('%s\n' % Line)
Opr.close()
Opw.close()
def AnalyzePcdExpression(Setting):
Setting = Setting.strip()
# There might be escaped quote in a string: \", \\\"
Data = Setting.replace('\\\\', '//').replace('\\\"', '\\\'')
# There might be '|' in string and in ( ... | ... ), replace it with '-'
NewStr = ''
InStr = False
Pair = 0
for ch in Data:
if ch == '"':
InStr = not InStr
elif ch == '(' and not InStr:
Pair += 1
elif ch == ')' and not InStr:
Pair -= 1
if (Pair > 0 or InStr) and ch == TAB_VALUE_SPLIT:
NewStr += '-'
else:
NewStr += ch
FieldList = []
StartPos = 0
while True:
Pos = NewStr.find(TAB_VALUE_SPLIT, StartPos)
if Pos < 0:
FieldList.append(Setting[StartPos:].strip())
break
FieldList.append(Setting[StartPos:Pos].strip())
StartPos = Pos + 1
return FieldList
## AnalyzeDscPcd
#
# Analyze DSC PCD value, since there is no data type info in DSC
# This fuction is used to match functions (AnalyzePcdData, AnalyzeHiiPcdData, AnalyzeVpdPcdData) used for retrieving PCD value from database
# 1. Feature flag: TokenSpace.PcdCName|PcdValue
# 2. Fix and Patch:TokenSpace.PcdCName|PcdValue[|MaxSize]
# 3. Dynamic default:
# TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# TokenSpace.PcdCName|PcdValue
# 4. Dynamic VPD:
# TokenSpace.PcdCName|VpdOffset[|VpdValue]
# TokenSpace.PcdCName|VpdOffset[|MaxSize[|VpdValue]]
# 5. Dynamic HII:
# TokenSpace.PcdCName|HiiString|VaiableGuid|VariableOffset[|HiiValue]
# PCD value needs to be located in such kind of string, and the PCD value might be an expression in which
# there might have "|" operator, also in string value.
#
# @param Setting: String contain information described above with "TokenSpace.PcdCName|" stripped
# @param PcdType: PCD type: feature, fixed, dynamic default VPD HII
# @param DataType: The datum type of PCD: VOID*, UNIT, BOOL
# @retval:
# ValueList: A List contain fields described above
# IsValid: True if conforming EBNF, otherwise False
# Index: The index where PcdValue is in ValueList
#
def AnalyzeDscPcd(Setting, PcdType, DataType=''):
FieldList = AnalyzePcdExpression(Setting)
IsValid = True
if PcdType in (MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_FEATURE_FLAG):
Value = FieldList[0]
Size = ''
if len(FieldList) > 1:
Type = FieldList[1]
# Fix the PCD type when no DataType input
if Type == 'VOID*':
DataType = 'VOID*'
else:
Size = FieldList[1]
if len(FieldList) > 2:
Size = FieldList[2]
if DataType == 'VOID*':
IsValid = (len(FieldList) <= 3)
else:
IsValid = (len(FieldList) <= 1)
return [Value, '', Size], IsValid, 0
elif PcdType in (MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_EX_DEFAULT):
Value = FieldList[0]
Size = Type = ''
if len(FieldList) > 1:
Type = FieldList[1]
else:
Type = DataType
if len(FieldList) > 2:
Size = FieldList[2]
else:
if Type == 'VOID*':
if Value.startswith("L"):
Size = str((len(Value)- 3 + 1) * 2)
elif Value.startswith("{"):
Size = str(len(Value.split(",")))
else:
Size = str(len(Value) -2 + 1 )
if DataType == 'VOID*':
IsValid = (len(FieldList) <= 3)
else:
IsValid = (len(FieldList) <= 1)
return [Value, Type, Size], IsValid, 0
elif PcdType in (MODEL_PCD_DYNAMIC_VPD, MODEL_PCD_DYNAMIC_EX_VPD):
VpdOffset = FieldList[0]
Value = Size = ''
if not DataType == 'VOID*':
if len(FieldList) > 1:
Value = FieldList[1]
else:
if len(FieldList) > 1:
Size = FieldList[1]
if len(FieldList) > 2:
Value = FieldList[2]
if DataType == 'VOID*':
IsValid = (len(FieldList) <= 3)
else:
IsValid = (len(FieldList) <= 2)
return [VpdOffset, Size, Value], IsValid, 2
elif PcdType in (MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_EX_HII):
HiiString = FieldList[0]
Guid = Offset = Value = Attribute = ''
if len(FieldList) > 1:
Guid = FieldList[1]
if len(FieldList) > 2:
Offset = FieldList[2]
if len(FieldList) > 3:
Value = FieldList[3]
if len(FieldList) > 4:
Attribute = FieldList[4]
IsValid = (3 <= len(FieldList) <= 5)
return [HiiString, Guid, Offset, Value, Attribute], IsValid, 3
return [], False, 0
## AnalyzePcdData
#
# Analyze the pcd Value, Datum type and TokenNumber.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain value/datum type/token number information;
#
# @retval ValueList: A List contain value, datum type and toke number.
#
def AnalyzePcdData(Setting):
ValueList = ['', '', '']
ValueRe = re.compile(r'^\s*L?\".*\|.*\"')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[0] = PtrValue[0]
return ValueList
## AnalyzeHiiPcdData
#
# Analyze the pcd Value, variable name, variable Guid and variable offset.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain VariableName, VariableGuid, VariableOffset, DefaultValue information;
#
# @retval ValueList: A List contaian VariableName, VariableGuid, VariableOffset, DefaultValue.
#
def AnalyzeHiiPcdData(Setting):
ValueList = ['', '', '', '']
TokenList = GetSplitValueList(Setting)
ValueList[0:len(TokenList)] = TokenList
return ValueList
## AnalyzeVpdPcdData
#
# Analyze the vpd pcd VpdOffset, MaxDatumSize and InitialValue.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain VpdOffset/MaxDatumSize/InitialValue information;
#
# @retval ValueList: A List contain VpdOffset, MaxDatumSize and InitialValue.
#
def AnalyzeVpdPcdData(Setting):
ValueList = ['', '', '']
ValueRe = re.compile(r'\s*L?\".*\|.*\"\s*$')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[2] = PtrValue[0]
return ValueList
## check format of PCD value against its the datum type
#
# For PCD value setting
#
def CheckPcdDatum(Type, Value):
if Type == "VOID*":
ValueRe = re.compile(r'\s*L?\".*\"\s*$')
if not (((Value.startswith('L"') or Value.startswith('"')) and Value.endswith('"'))
or (Value.startswith('{') and Value.endswith('}'))
):
return False, "Invalid value [%s] of type [%s]; must be in the form of {...} for array"\
", or \"...\" for string, or L\"...\" for unicode string" % (Value, Type)
elif ValueRe.match(Value):
# Check the chars in UnicodeString or CString is printable
if Value.startswith("L"):
Value = Value[2:-1]
else:
Value = Value[1:-1]
Printset = set(string.printable)
Printset.remove(TAB_PRINTCHAR_VT)
Printset.add(TAB_PRINTCHAR_BS)
Printset.add(TAB_PRINTCHAR_NUL)
if not set(Value).issubset(Printset):
PrintList = list(Printset)
PrintList.sort()
return False, "Invalid PCD string value of type [%s]; must be printable chars %s." % (Type, PrintList)
elif Type == 'BOOLEAN':
if Value not in ['TRUE', 'True', 'true', '0x1', '0x01', '1', 'FALSE', 'False', 'false', '0x0', '0x00', '0']:
return False, "Invalid value [%s] of type [%s]; must be one of TRUE, True, true, 0x1, 0x01, 1"\
", FALSE, False, false, 0x0, 0x00, 0" % (Value, Type)
elif Type in [TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64]:
try:
Value = long(Value, 0)
except:
return False, "Invalid value [%s] of type [%s];"\
" must be a hexadecimal, decimal or octal in C language format." % (Value, Type)
else:
return False, "Invalid type [%s]; must be one of VOID*, BOOLEAN, UINT8, UINT16, UINT32, UINT64." % (Type)
return True, ""
## Split command line option string to list
#
# subprocess.Popen needs the args to be a sequence. Otherwise there's problem
# in non-windows platform to launch command
#
def SplitOption(OptionString):
OptionList = []
LastChar = " "
OptionStart = 0
QuotationMark = ""
for Index in range(0, len(OptionString)):
CurrentChar = OptionString[Index]
if CurrentChar in ['"', "'"]:
if QuotationMark == CurrentChar:
QuotationMark = ""
elif QuotationMark == "":
QuotationMark = CurrentChar
continue
elif QuotationMark:
continue
if CurrentChar in ["/", "-"] and LastChar in [" ", "\t", "\r", "\n"]:
if Index > OptionStart:
OptionList.append(OptionString[OptionStart:Index - 1])
OptionStart = Index
LastChar = CurrentChar
OptionList.append(OptionString[OptionStart:])
return OptionList
def CommonPath(PathList):
P1 = min(PathList).split(os.path.sep)
P2 = max(PathList).split(os.path.sep)
for Index in xrange(min(len(P1), len(P2))):
if P1[Index] != P2[Index]:
return os.path.sep.join(P1[:Index])
return os.path.sep.join(P1)
#
# Convert string to C format array
#
def ConvertStringToByteArray(Value):
Value = Value.strip()
if not Value:
return None
if Value[0] == '{':
if not Value.endswith('}'):
return None
Value = Value.replace(' ', '').replace('{', '').replace('}', '')
ValFields = Value.split(',')
try:
for Index in range(len(ValFields)):
ValFields[Index] = str(int(ValFields[Index], 0))
except ValueError:
return None
Value = '{' + ','.join(ValFields) + '}'
return Value
Unicode = False
if Value.startswith('L"'):
if not Value.endswith('"'):
return None
Value = Value[1:]
Unicode = True
elif not Value.startswith('"') or not Value.endswith('"'):
return None
Value = eval(Value) # translate escape character
NewValue = '{'
for Index in range(0,len(Value)):
if Unicode:
NewValue = NewValue + str(ord(Value[Index]) % 0x10000) + ','
else:
NewValue = NewValue + str(ord(Value[Index]) % 0x100) + ','
Value = NewValue + '0}'
return Value
class PathClass(object):
def __init__(self, File='', Root='', AlterRoot='', Type='', IsBinary=False,
Arch='COMMON', ToolChainFamily='', Target='', TagName='', ToolCode=''):
self.Arch = Arch
self.File = str(File)
if os.path.isabs(self.File):
self.Root = ''
self.AlterRoot = ''
else:
self.Root = str(Root)
self.AlterRoot = str(AlterRoot)
# Remove any '.' and '..' in path
if self.Root:
self.Root = mws.getWs(self.Root, self.File)
self.Path = os.path.normpath(os.path.join(self.Root, self.File))
self.Root = os.path.normpath(CommonPath([self.Root, self.Path]))
# eliminate the side-effect of 'C:'
if self.Root[-1] == ':':
self.Root += os.path.sep
# file path should not start with path separator
if self.Root[-1] == os.path.sep:
self.File = self.Path[len(self.Root):]
else:
self.File = self.Path[len(self.Root) + 1:]
else:
self.Path = os.path.normpath(self.File)
self.SubDir, self.Name = os.path.split(self.File)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.Root:
if self.SubDir:
self.Dir = os.path.join(self.Root, self.SubDir)
else:
self.Dir = self.Root
else:
self.Dir = self.SubDir
if IsBinary:
self.Type = Type
else:
self.Type = self.Ext.lower()
self.IsBinary = IsBinary
self.Target = Target
self.TagName = TagName
self.ToolCode = ToolCode
self.ToolChainFamily = ToolChainFamily
self._Key = None
## Convert the object of this class to a string
#
# Convert member Path of the class to a string
#
# @retval string Formatted String
#
def __str__(self):
return self.Path
## Override __eq__ function
#
# Check whether PathClass are the same
#
# @retval False The two PathClass are different
# @retval True The two PathClass are the same
#
def __eq__(self, Other):
if type(Other) == type(self):
return self.Path == Other.Path
else:
return self.Path == str(Other)
## Override __cmp__ function
#
# Customize the comparsion operation of two PathClass
#
# @retval 0 The two PathClass are different
# @retval -1 The first PathClass is less than the second PathClass
# @retval 1 The first PathClass is Bigger than the second PathClass
def __cmp__(self, Other):
if type(Other) == type(self):
OtherKey = Other.Path
else:
OtherKey = str(Other)
SelfKey = self.Path
if SelfKey == OtherKey:
return 0
elif SelfKey > OtherKey:
return 1
else:
return -1
## Override __hash__ function
#
# Use Path as key in hash table
#
# @retval string Key for hash table
#
def __hash__(self):
return hash(self.Path)
def _GetFileKey(self):
if self._Key == None:
self._Key = self.Path.upper() # + self.ToolChainFamily + self.TagName + self.ToolCode + self.Target
return self._Key
def _GetTimeStamp(self):
return os.stat(self.Path)[8]
def Validate(self, Type='', CaseSensitive=True):
if GlobalData.gCaseInsensitive:
CaseSensitive = False
if Type and Type.lower() != self.Type:
return FILE_TYPE_MISMATCH, '%s (expect %s but got %s)' % (self.File, Type, self.Type)
RealFile, RealRoot = RealPath2(self.File, self.Root, self.AlterRoot)
if not RealRoot and not RealFile:
RealFile = self.File
if self.AlterRoot:
RealFile = os.path.join(self.AlterRoot, self.File)
elif self.Root:
RealFile = os.path.join(self.Root, self.File)
if len (mws.getPkgPath()) == 0:
return FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)
else:
return FILE_NOT_FOUND, "%s is not found in packages path:\n\t%s" % (self.File, '\n\t'.join(mws.getPkgPath()))
ErrorCode = 0
ErrorInfo = ''
if RealRoot != self.Root or RealFile != self.File:
if CaseSensitive and (RealFile != self.File or (RealRoot != self.Root and RealRoot != self.AlterRoot)):
ErrorCode = FILE_CASE_MISMATCH
ErrorInfo = self.File + '\n\t' + RealFile + " [in file system]"
self.SubDir, self.Name = os.path.split(RealFile)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.SubDir:
self.Dir = os.path.join(RealRoot, self.SubDir)
else:
self.Dir = RealRoot
self.File = RealFile
self.Root = RealRoot
self.Path = os.path.join(RealRoot, RealFile)
return ErrorCode, ErrorInfo
Key = property(_GetFileKey)
TimeStamp = property(_GetTimeStamp)
## Parse PE image to get the required PE informaion.
#
class PeImageClass():
## Constructor
#
# @param File FilePath of PeImage
#
def __init__(self, PeFile):
self.FileName = PeFile
self.IsValid = False
self.Size = 0
self.EntryPoint = 0
self.SectionAlignment = 0
self.SectionHeaderList = []
self.ErrorInfo = ''
try:
PeObject = open(PeFile, 'rb')
except:
self.ErrorInfo = self.FileName + ' can not be found\n'
return
# Read DOS header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x3E)
ByteList = ByteArray.tolist()
# DOS signature should be 'MZ'
if self._ByteListToStr (ByteList[0x0:0x2]) != 'MZ':
self.ErrorInfo = self.FileName + ' has no valid DOS signature MZ'
return
# Read 4 byte PE Signature
PeOffset = self._ByteListToInt(ByteList[0x3C:0x3E])
PeObject.seek(PeOffset)
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 4)
# PE signature should be 'PE\0\0'
if ByteArray.tostring() != 'PE\0\0':
self.ErrorInfo = self.FileName + ' has no valid PE signature PE00'
return
# Read PE file header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x14)
ByteList = ByteArray.tolist()
SecNumber = self._ByteListToInt(ByteList[0x2:0x4])
if SecNumber == 0:
self.ErrorInfo = self.FileName + ' has no section header'
return
# Read PE optional header
OptionalHeaderSize = self._ByteListToInt(ByteArray[0x10:0x12])
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, OptionalHeaderSize)
ByteList = ByteArray.tolist()
self.EntryPoint = self._ByteListToInt(ByteList[0x10:0x14])
self.SectionAlignment = self._ByteListToInt(ByteList[0x20:0x24])
self.Size = self._ByteListToInt(ByteList[0x38:0x3C])
# Read each Section Header
for Index in range(SecNumber):
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x28)
ByteList = ByteArray.tolist()
SecName = self._ByteListToStr(ByteList[0:8])
SecVirtualSize = self._ByteListToInt(ByteList[8:12])
SecRawAddress = self._ByteListToInt(ByteList[20:24])
SecVirtualAddress = self._ByteListToInt(ByteList[12:16])
self.SectionHeaderList.append((SecName, SecVirtualAddress, SecRawAddress, SecVirtualSize))
self.IsValid = True
PeObject.close()
def _ByteListToStr(self, ByteList):
String = ''
for index in range(len(ByteList)):
if ByteList[index] == 0:
break
String += chr(ByteList[index])
return String
def _ByteListToInt(self, ByteList):
Value = 0
for index in range(len(ByteList) - 1, -1, -1):
Value = (Value << 8) | int(ByteList[index])
return Value
class SkuClass():
DEFAULT = 0
SINGLE = 1
MULTIPLE =2
def __init__(self,SkuIdentifier='', SkuIds={}):
self.AvailableSkuIds = sdict()
self.SkuIdSet = []
self.SkuIdNumberSet = []
if SkuIdentifier == '' or SkuIdentifier is None:
self.SkuIdSet = ['DEFAULT']
self.SkuIdNumberSet = ['0U']
elif SkuIdentifier == 'ALL':
self.SkuIdSet = SkuIds.keys()
self.SkuIdNumberSet = [num.strip() + 'U' for num in SkuIds.values()]
else:
r = SkuIdentifier.split('|')
self.SkuIdSet=[r[k].strip() for k in range(len(r))]
k = None
try:
self.SkuIdNumberSet = [SkuIds[k].strip() + 'U' for k in self.SkuIdSet]
except Exception:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (k, " | ".join(SkuIds.keys())))
if len(self.SkuIdSet) == 2 and 'DEFAULT' in self.SkuIdSet and SkuIdentifier != 'ALL':
self.SkuIdSet.remove('DEFAULT')
self.SkuIdNumberSet.remove('0U')
for each in self.SkuIdSet:
if each in SkuIds:
self.AvailableSkuIds[each] = SkuIds[each]
else:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData="SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (each, " | ".join(SkuIds.keys())))
def __SkuUsageType(self):
if len(self.SkuIdSet) == 1:
if self.SkuIdSet[0] == 'DEFAULT':
return SkuClass.DEFAULT
else:
return SkuClass.SINGLE
else:
return SkuClass.MULTIPLE
def __GetAvailableSkuIds(self):
return self.AvailableSkuIds
def __GetSystemSkuID(self):
if self.__SkuUsageType() == SkuClass.SINGLE:
return self.SkuIdSet[0]
else:
return 'DEFAULT'
def __GetAvailableSkuIdNumber(self):
return self.SkuIdNumberSet
SystemSkuId = property(__GetSystemSkuID)
AvailableSkuIdSet = property(__GetAvailableSkuIds)
SkuUsageType = property(__SkuUsageType)
AvailableSkuIdNumSet = property(__GetAvailableSkuIdNumber)
#
# Pack a registry format GUID
#
def PackRegistryFormatGuid(Guid):
Guid = Guid.split('-')
return pack('=LHHBBBBBBBB',
int(Guid[0], 16),
int(Guid[1], 16),
int(Guid[2], 16),
int(Guid[3][-4:-2], 16),
int(Guid[3][-2:], 16),
int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16),
int(Guid[4][-8:-6], 16),
int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16),
int(Guid[4][-2:], 16)
)
def BuildOptionPcdValueFormat(TokenSpaceGuidCName, TokenCName, PcdDatumType, Value):
if PcdDatumType == 'VOID*':
if Value.startswith('L'):
if not Value[1]:
EdkLogger.error("build", FORMAT_INVALID, 'For Void* type PCD, when specify the Value in the command line, please use the following format: "string", L"string", H"{...}"')
Value = Value[0] + '"' + Value[1:] + '"'
elif Value.startswith('H'):
if not Value[1]:
EdkLogger.error("build", FORMAT_INVALID, 'For Void* type PCD, when specify the Value in the command line, please use the following format: "string", L"string", H"{...}"')
Value = Value[1:]
else:
if not Value[0]:
EdkLogger.error("build", FORMAT_INVALID, 'For Void* type PCD, when specify the Value in the command line, please use the following format: "string", L"string", H"{...}"')
Value = '"' + Value + '"'
IsValid, Cause = CheckPcdDatum(PcdDatumType, Value)
if not IsValid:
EdkLogger.error("build", FORMAT_INVALID, Cause, ExtraData="%s.%s" % (TokenSpaceGuidCName, TokenCName))
if PcdDatumType == 'BOOLEAN':
Value = Value.upper()
if Value == 'TRUE' or Value == '1':
Value = '1'
elif Value == 'FALSE' or Value == '0':
Value = '0'
return Value
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
pass
|
dag_processing.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Processes DAGs."""
import enum
import importlib
import inspect
import logging
import multiprocessing
import os
import signal
import sys
import time
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from datetime import datetime, timedelta
from importlib import import_module
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union, cast
from setproctitle import setproctitle # pylint: disable=no-name-in-module
from sqlalchemy import or_
from tabulate import tabulate
import airflow.models
from airflow.configuration import conf
from airflow.models import DagModel, errors
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.settings import STORE_DAG_CODE
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.callback_requests import CallbackRequest, SlaCallbackRequest, TaskCallbackRequest
from airflow.utils.file import list_py_file_paths
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.process_utils import kill_child_processes_by_pids, reap_process_group
from airflow.utils.session import provide_session
from airflow.utils.state import State
class AbstractDagFileProcessorProcess(metaclass=ABCMeta):
"""Processes a DAG file. See SchedulerJob.process_file() for more details."""
@abstractmethod
def start(self) -> None:
"""Launch the process to process the file"""
raise NotImplementedError()
@abstractmethod
def terminate(self, sigkill: bool = False):
"""Terminate (and then kill) the process launched to process the file"""
raise NotImplementedError()
@abstractmethod
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
raise NotImplementedError()
@property
@abstractmethod
def pid(self) -> int:
""":return: the PID of the process launched to process the given file"""
raise NotImplementedError()
@property
@abstractmethod
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
raise NotImplementedError()
@property
@abstractmethod
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
raise NotImplementedError()
@property
@abstractmethod
def result(self) -> Optional[Tuple[int, int]]:
"""
A list of simple dags found, and the number of import errors
:return: result of running SchedulerJob.process_file() if available. Otherwise, none
:rtype: Optional[Tuple[int, int]]
"""
raise NotImplementedError()
@property
@abstractmethod
def start_time(self) -> datetime:
"""
:return: When this started to process the file
:rtype: datetime
"""
raise NotImplementedError()
@property
@abstractmethod
def file_path(self) -> str:
"""
:return: the path to the file that this is processing
:rtype: unicode
"""
raise NotImplementedError()
@property
@abstractmethod
def waitable_handle(self):
"""A "waitable" handle that can be passed to ``multiprocessing.connection.wait()``"""
raise NotImplementedError()
class DagParsingStat(NamedTuple):
"""Information on processing progress"""
file_paths: List[str]
done: bool
all_files_processed: bool
class DagFileStat(NamedTuple):
"""Information about single processing of one file"""
num_dags: int
import_errors: int
last_finish_time: Optional[datetime]
last_duration: Optional[float]
run_count: int
class DagParsingSignal(enum.Enum):
"""All signals sent to parser."""
AGENT_RUN_ONCE = 'agent_run_once'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin, MultiprocessingStartMethodMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: str
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path, log_file_path)
:type processor_factory: ([str, List[CallbackRequest], Optional[List[str]], bool]) -> (
AbstractDagFileProcessorProcess
)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param pickle_dags: whether to pickle DAGs.
:type: pickle_dags: bool
:param async_mode: Whether to start agent in async mode
:type async_mode: bool
"""
def __init__(
self,
dag_directory: str,
max_runs: int,
processor_factory: Callable[
[str, List[CallbackRequest], Optional[List[str]], bool], AbstractDagFileProcessorProcess
],
processor_timeout: timedelta,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
):
super().__init__()
self._file_path_queue: List[str] = []
self._dag_directory: str = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._processor_timeout = processor_timeout
self._dag_ids = dag_ids
self._pickle_dags = pickle_dags
self._async_mode = async_mode
# Map from file path to the processor
self._processors: Dict[str, AbstractDagFileProcessorProcess] = {}
# Pipe for communicating signals
self._process: Optional[multiprocessing.process.BaseProcess] = None
self._done: bool = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn: Optional[MultiprocessingConnection] = None
self._last_parsing_stat_received_at: float = time.monotonic()
def start(self) -> None:
"""Launch DagFileProcessorManager processor and start DAG parsing loop in manager."""
mp_start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(mp_start_method)
self._last_parsing_stat_received_at = time.monotonic()
self._parent_signal_conn, child_signal_conn = context.Pipe()
process = context.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._max_runs,
# getattr prevents error while pickling an instance method.
getattr(self, "_processor_factory"),
self._processor_timeout,
child_signal_conn,
self._dag_ids,
self._pickle_dags,
self._async_mode,
),
)
self._process = process
process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", process.pid)
def run_single_parsing_loop(self) -> None:
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._parent_signal_conn or not self._process:
raise ValueError("Process not started.")
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_RUN_ONCE)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def send_callback_to_execute(self, request: CallbackRequest) -> None:
"""
Sends information about the callback to be executed by DagFileProcessor.
:param request: Callback request to be executed.
:type request: CallbackRequest
"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
try:
self._parent_signal_conn.send(request)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def send_sla_callback_request_to_execute(self, full_filepath: str, dag_id: str) -> None:
"""
Sends information about the SLA callback to be executed by DagFileProcessor.
:param full_filepath: DAG File path
:type full_filepath: str
:param dag_id: DAG ID
:type dag_id: str
"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
try:
request = SlaCallbackRequest(full_filepath=full_filepath, dag_id=dag_id)
self._parent_signal_conn.send(request)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def wait_until_finished(self) -> None:
"""Waits until DAG parsing is finished."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
while self._parent_signal_conn.poll(timeout=None):
try:
result = self._parent_signal_conn.recv()
except EOFError:
break
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode we don't send this message from the Manager
# until all the running processors have finished
self._sync_metadata(result)
return
@staticmethod
def _run_processor_manager(
dag_directory: str,
max_runs: int,
processor_factory: Callable[[str, List[CallbackRequest]], AbstractDagFileProcessorProcess],
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
) -> None:
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__LOGGING__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
importlib.reload(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0])) # type: ignore
importlib.reload(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(
dag_directory,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
dag_ids,
pickle_dags,
async_mode,
)
processor_manager.start()
def heartbeat(self) -> None:
"""Check if the DagFileProcessorManager process is alive, and process any pending messages"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll(timeout=0.01):
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
raise RuntimeError(f"Unexpected message received of type {type(message).__name__}")
def _heartbeat_manager(self):
"""Heartbeat DAG file processor and restart it if we are not done."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid,
self._process.exitcode,
)
self.start()
if self.done:
return
parsing_stat_age = time.monotonic() - self._last_parsing_stat_received_at
if parsing_stat_age > self._processor_timeout.total_seconds():
Stats.incr('dag_processing.manager_stalls')
self.log.error(
"DagFileProcessorManager (PID=%d) last sent a heartbeat %.2f seconds ago! Restarting it",
self._process.pid,
parsing_stat_age,
)
reap_process_group(self._process.pid, logger=self.log)
self.start()
def _sync_metadata(self, stat):
"""Sync metadata from stat queue and only keep the latest stat."""
self._done = stat.done
self._all_files_processed = stat.all_files_processed
self._last_parsing_stat_received_at = time.monotonic()
@property
def done(self) -> bool:
"""Has DagFileProcessorManager ended?"""
return self._done
@property
def all_files_processed(self):
"""Have all files been processed at least once?"""
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
try:
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
except ConnectionError:
pass
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
# Give the Manager some time to cleanly shut down, but not too long, as
# it's better to finish sooner than wait for (non-critical) work to
# finish
self._process.join(timeout=1.0)
reap_process_group(self._process.pid, logger=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin): # pylint: disable=too-many-instance-attributes
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessorProcess)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param signal_conn: connection to communicate signal with processor agent.
:type signal_conn: MultiprocessingConnection
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param pickle_dags: whether to pickle DAGs.
:type pickle_dags: bool
:param async_mode: whether to start the manager in async mode
:type async_mode: bool
"""
def __init__(
self,
dag_directory: str,
max_runs: int,
processor_factory: Callable[[str, List[CallbackRequest]], AbstractDagFileProcessorProcess],
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool = True,
):
super().__init__()
self._file_paths: List[str] = []
self._file_path_queue: List[str] = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._signal_conn = signal_conn
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._async_mode = async_mode
self._parsing_start_time: Optional[int] = None
self._parallelism = conf.getint('scheduler', 'parsing_processes')
if 'sqlite' in conf.get('core', 'sql_alchemy_conn') and self._parallelism > 1:
self.log.warning(
"Because we cannot use more than 1 thread (parsing_processes = "
"%d ) when using sqlite. So we set parallelism to 1.",
self._parallelism,
)
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler', 'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler', 'print_stats_interval')
# How many seconds do we wait for tasks to heartbeat before mark them as zombies.
self._zombie_threshold_secs = conf.getint('scheduler', 'scheduler_zombie_task_threshold')
# Should store dag file source in a database?
self.store_dag_code = STORE_DAG_CODE
# Map from file path to the processor
self._processors: Dict[str, AbstractDagFileProcessorProcess] = {}
self._num_run = 0
# Map from file path to stats about the file
self._file_stats: Dict[str, DagFileStat] = {}
self._last_zombie_query_time = None
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.make_aware(datetime.fromtimestamp(0))
# Last time stats were printed
self.last_stat_print_time = 0
# TODO: Remove magic number
self._zombie_query_interval = 10
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler', 'dag_dir_list_interval')
# Mapping file name and callbacks requests
self._callback_to_execute: Dict[str, List[CallbackRequest]] = defaultdict(list)
self._log = logging.getLogger('airflow.processor_manager')
self.waitables: Dict[Any, Union[MultiprocessingConnection, AbstractDagFileProcessorProcess]] = {
self._signal_conn: self._signal_conn,
}
def register_exit_signals(self):
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
# So that we ignore the debug dump signal, making it easier to send
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
def _exit_gracefully(self, signum, frame): # pylint: disable=unused-argument
"""Helper method to clean up DAG file processors to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.log.debug("Current Stacktrace is: %s", '\n'.join(map(str, inspect.stack())))
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.register_exit_signals()
# Start a new process group
os.setpgid(0, 0)
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
return self._run_parsing_loop()
def _run_parsing_loop(self):
# In sync mode we want timeout=None -- wait forever until a message is received
if self._async_mode:
poll_time = 0.0
else:
poll_time = None
self._refresh_dag_dir()
self.prepare_file_path_queue()
if self._async_mode:
# If we're in async mode, we can start up straight away. If we're
# in sync mode we need to be told to start a "loop"
self.start_new_processes()
while True:
loop_start_time = time.monotonic()
# pylint: disable=no-else-break
ready = multiprocessing.connection.wait(self.waitables.keys(), timeout=poll_time)
if self._signal_conn in ready:
agent_signal = self._signal_conn.recv()
self.log.debug("Received %s signal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_RUN_ONCE:
# continue the loop to parse dags
pass
elif isinstance(agent_signal, CallbackRequest):
self._add_callback_to_queue(agent_signal)
else:
raise ValueError(f"Invalid message {type(agent_signal)}")
if not ready and not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
# This shouldn't happen, as in sync mode poll should block for
# ever. Lets be defensive about that.
self.log.warning(
"wait() unexpectedly returned nothing ready after infinite timeout (%r)!", poll_time
)
continue
for sentinel in ready:
if sentinel is self._signal_conn:
continue
processor = self.waitables.get(sentinel)
if not processor:
continue
self._collect_results_from_processor(processor)
self.waitables.pop(sentinel)
self._processors.pop(processor.file_path)
self._refresh_dag_dir()
self._find_zombies() # pylint: disable=no-value-for-parameter
self._kill_timed_out_processors()
# Generate more file paths to process if we processed all the files
# already.
if not self._file_path_queue:
self.emit_metrics()
self.prepare_file_path_queue()
self.start_new_processes()
# Update number of loop iteration.
self._num_run += 1
if not self._async_mode:
self.log.debug("Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
self.collect_results()
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(
self._file_paths,
max_runs_reached,
all_files_processed,
)
self._signal_conn.send(dag_parsing_stat)
if max_runs_reached:
self.log.info(
"Exiting dag parsing loop as all files have been processed %s times", self._max_runs
)
break
if self._async_mode:
loop_duration = time.monotonic() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
def _add_callback_to_queue(self, request: CallbackRequest):
self._callback_to_execute[request.full_filepath].append(request)
# Callback has a higher priority over DAG Run scheduling
if request.full_filepath in self._file_path_queue:
# Remove file paths matching request.full_filepath from self._file_path_queue
# Since we are already going to use that filepath to run callback,
# there is no need to have same file path again in the queue
self._file_path_queue = [
file_path for file_path in self._file_path_queue if file_path != request.full_filepath
]
self._file_path_queue.insert(0, request.full_filepath)
def _refresh_dag_dir(self):
"""Refresh file paths from dag dir if we haven't done it for too long."""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = now
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors() # pylint: disable=no-value-for-parameter
except Exception: # noqa pylint: disable=broad-except
self.log.exception("Error removing old import errors")
SerializedDagModel.remove_deleted_dags(self._file_paths)
DagModel.deactivate_deleted_dags(self._file_paths)
if self.store_dag_code:
from airflow.models.dagcode import DagCode
DagCode.remove_deleted_code(self._file_paths)
def _print_stat(self):
"""Occasionally print out stats about how fast the files are getting processed"""
if 0 < self.print_stats_interval < time.monotonic() - self.last_stat_print_time:
if self._file_paths:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = time.monotonic()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(~errors.ImportError.filename.in_(self._file_paths))
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path", "PID", "Runtime", "# DAGs", "# Errors", "Last Runtime", "Last Run"]
rows = []
now = timezone.utcnow()
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
num_dags = self.get_last_dag_count(file_path)
num_errors = self.get_last_error_count(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = (now - processor_start_time) if processor_start_time else None
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (now - last_run).total_seconds()
Stats.gauge(f'dag_processing.last_run.seconds_ago.{file_name}', seconds_ago)
if runtime:
Stats.timing(f'dag_processing.last_duration.{file_name}', runtime)
rows.append((file_path, processor_pid, runtime, num_dags, num_errors, last_runtime, last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:
formatted_rows.append(
(
file_path,
pid,
f"{runtime.total_seconds():.2f}s" if runtime else None,
num_dags,
num_errors,
f"{last_runtime:.2f}s" if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None,
)
)
log_str = (
"\n"
+ "=" * 80
+ "\n"
+ "DAG File Processing Stats\n\n"
+ tabulate(formatted_rows, headers=headers)
+ "\n"
+ "=" * 80
)
self.log.info(log_str)
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
stat = self._file_stats.get(file_path)
return stat.last_duration if stat else None
def get_last_dag_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of dags loaded from that file, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of import errors from processing, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the number of times the given file has been parsed
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr('dag_processing.processes')
processor.terminate()
self._file_stats.pop(file_path)
self._processors = filtered_processors
def wait_until_finished(self):
"""Sleeps until all the processors are done."""
for processor in self._processors.values():
while not processor.done:
time.sleep(0.1)
def _collect_results_from_processor(self, processor) -> None:
self.log.debug("Processor for %s finished", processor.file_path)
Stats.decr('dag_processing.processes')
last_finish_time = timezone.utcnow()
if processor.result is not None:
num_dags, count_import_errors = processor.result
else:
self.log.error(
"Processor for %s exited with return code %s.", processor.file_path, processor.exit_code
)
count_import_errors = -1
num_dags = 0
stat = DagFileStat(
num_dags=num_dags,
import_errors=count_import_errors,
last_finish_time=last_finish_time,
last_duration=(last_finish_time - processor.start_time).total_seconds(),
run_count=self.get_run_count(processor.file_path) + 1,
)
self._file_stats[processor.file_path] = stat
def collect_results(self) -> None:
"""Collect the result from any finished DAG processors"""
ready = multiprocessing.connection.wait(self.waitables.keys() - [self._signal_conn], timeout=0)
for sentinel in ready:
if sentinel is self._signal_conn:
continue
processor = cast(AbstractDagFileProcessorProcess, self.waitables[sentinel])
self.waitables.pop(processor.waitable_handle)
self._processors.pop(processor.file_path)
self._collect_results_from_processor(processor)
self.log.debug("%s/%s DAG parsing processes running", len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing", len(self._file_path_queue))
def start_new_processes(self):
"""Start more processors if we have enough slots and files to process"""
while self._parallelism - len(self._processors) > 0 and self._file_path_queue:
file_path = self._file_path_queue.pop(0)
# Stop creating duplicate processor i.e. processor with the same filepath
if file_path in self._processors.keys():
continue
callback_to_execute_for_file = self._callback_to_execute[file_path]
processor = self._processor_factory(
file_path, callback_to_execute_for_file, self._dag_ids, self._pickle_dags
)
del self._callback_to_execute[file_path]
Stats.incr('dag_processing.processes')
processor.start()
self.log.debug("Started a process (PID: %s) to generate tasks for %s", processor.pid, file_path)
self._processors[file_path] = processor
self.waitables[processor.waitable_handle] = processor
def prepare_file_path_queue(self):
"""Generate more file paths to process. Result are saved in _file_path_queue."""
self._parsing_start_time = time.perf_counter()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (
last_finish_time is not None
and (now - last_finish_time).total_seconds() < self._file_process_interval
):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [
file_path for file_path, stat in self._file_stats.items() if stat.run_count == self._max_runs
]
files_paths_to_queue = list(
set(self._file_paths)
- set(file_paths_in_progress)
- set(file_paths_recently_processed)
- set(files_paths_at_run_limit)
)
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path,
processor.start_time.isoformat(),
)
self.log.debug("Queuing the following files for processing:\n\t%s", "\n\t".join(files_paths_to_queue))
for file_path in files_paths_to_queue:
if file_path not in self._file_stats:
self._file_stats[file_path] = DagFileStat(
num_dags=0, import_errors=0, last_finish_time=None, last_duration=None, run_count=0
)
self._file_path_queue.extend(files_paths_to_queue)
@provide_session
def _find_zombies(self, session):
"""
Find zombie task instances, which are tasks haven't heartbeated for too long
and update the current zombie list.
"""
now = timezone.utcnow()
if (
not self._last_zombie_query_time
or (now - self._last_zombie_query_time).total_seconds() > self._zombie_query_interval
):
# to avoid circular imports
from airflow.jobs.local_task_job import LocalTaskJob as LJ
self.log.info("Finding 'running' jobs without a recent heartbeat")
TI = airflow.models.TaskInstance
DM = airflow.models.DagModel
limit_dttm = timezone.utcnow() - timedelta(seconds=self._zombie_threshold_secs)
self.log.info("Failing jobs without heartbeat after %s", limit_dttm)
zombies = (
session.query(TI, DM.fileloc)
.join(LJ, TI.job_id == LJ.id)
.join(DM, TI.dag_id == DM.dag_id)
.filter(TI.state == State.RUNNING)
.filter(
or_(
LJ.state != State.RUNNING,
LJ.latest_heartbeat < limit_dttm,
)
)
.all()
)
self._last_zombie_query_time = timezone.utcnow()
for ti, file_loc in zombies:
request = TaskCallbackRequest(
full_filepath=file_loc,
simple_task_instance=SimpleTaskInstance(ti),
msg="Detected as zombie",
)
self.log.info("Detected zombie job: %s", request)
self._add_callback_to_queue(request)
Stats.incr('zombies_killed')
def _kill_timed_out_processors(self):
"""Kill any file processors that timeout to defend against process hangs."""
now = timezone.utcnow()
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.error(
"Processor for %s with PID %s started at %s has timed out, killing it.",
file_path,
processor.pid,
processor.start_time.isoformat(),
)
Stats.decr('dag_processing.processes')
Stats.incr('dag_processing.processor_timeouts')
# TODO: Remove after Airflow 2.0
Stats.incr('dag_file_processor_timeouts')
processor.kill()
def max_runs_reached(self):
""":return: whether all file paths have been processed max_runs times"""
if self._max_runs == -1: # Unlimited runs.
return False
for stat in self._file_stats.values():
if stat.run_count < self._max_runs:
return False
if self._num_run < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
Stats.decr('dag_processing.processes')
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if pids_to_kill:
kill_child_processes_by_pids(pids_to_kill)
def emit_metrics(self):
"""
Emit metrics about dag parsing summary
This is called once every time around the parsing "loop" - i.e. after
all files have been parsed.
"""
parse_time = time.perf_counter() - self._parsing_start_time
Stats.gauge('dag_processing.total_parse_time', parse_time)
Stats.gauge('dagbag_size', sum(stat.num_dags for stat in self._file_stats.values()))
Stats.gauge(
'dag_processing.import_errors', sum(stat.import_errors for stat in self._file_stats.values())
)
# pylint: disable=missing-docstring
@property
def file_paths(self):
return self._file_paths
|
mod_autoaim_extended308.py | # -*- coding: utf-8 -*-
import os
import re
import json
import codecs
import datetime
import threading
import urllib
import urllib2
import math
import BigWorld
import GUI
import Vehicle
import Math
from constants import AUTH_REALM
from Avatar import PlayerAvatar
from AvatarInputHandler import cameras
from BattleReplay import BattleReplay
from gui.Scaleform.daapi.view.lobby.hangar import Hangar
from tutorial.gui.Scaleform.battle import layout
from helpers import isPlayerAvatar
from gui.Scaleform.Battle import Battle
class Config(object):
def __init__(self):
self.enable = True
self.debug = False
self.ru = True if 'RU' in AUTH_REALM else False
self.version = 'v3.08(27.09.2015)'
self.author = 'by spoter'
self.description = 'autoaim_extended'
self.description_ru = 'Мод: "Индикатор'
self.author_ru = 'автор: spoter'
self.name = 'autoaim_extended'
self.description_analytics = 'Мод: "Индикатор'
self.tid = 'UA-57975916-6'
self.sys_mes = {}
self.setup = {'MODIFIER': {'MODIFIER_NONE': 0, 'MODIFIER_SHIFT': 1, 'MODIFIER_CTRL': 2, 'MODIFIER_ALT': 4}}
self._thread_analytics = None
self.analytics_started = False
self.language = None
self.xvm_installed = False
self.xvm_check()
self.res_mods = self.res_mods_init()
self.data = {}
self.default_config()
new_config = self.load_json(self.name, self.data)
self.data = new_config
if 'Русский' in self.data['config'].get('language'): self.ru = True
if self.ru:
self.description = self.description_ru
self.author = self.author_ru
@staticmethod
def res_mods_init():
wd = os.path.dirname(os.path.realpath(__file__))
wd = wd[0:wd.rfind('\\')]
wd = wd[0:wd.rfind('\\')]
wd = wd[0:wd.rfind('\\')]
return wd
def xvm_check(self):
try:
#
import xvm_main
self.xvm_installed = True
except StandardError:
pass
def default_config(self):
self.data = {
'config': {
'enable': True, 'debug': False, 'color': 'wg_enemy', 'indicators': {'model': True, 'direction': True, 'box': True}, 'language': 'Русский'
}, 'language': {
'Русский': {
}, 'English': {
}
}
}
def do_config(self):
self.enable = self.data['config'].get('enable', False)
self.debug = self.data['config'].get('debug', False)
if self.data['config'].get('language') in self.data['language']:
self.language = self.data['language'].get(self.data['config'].get('language'))
else:
self.data['config']['language'] = 'English'
self.language = self.data['language'].get('English')
def byte_ify(self, inputs):
if inputs:
if isinstance(inputs, dict):
return {self.byte_ify(key): self.byte_ify(value) for key, value in inputs.iteritems()}
elif isinstance(inputs, list):
return [self.byte_ify(element) for element in inputs]
elif isinstance(inputs, unicode):
return inputs.encode('utf-8')
else:
return inputs
return inputs
@staticmethod
def json_comments(text):
regex = r'\s*(#|\/{2}).*$'
regex_inline = r'(:?(?:\s)*([A-Za-z\d\.{}]*)|((?<=\").*\"),?)(?:\s)*(((#|(\/{2})).*)|)$'
lines = text.split('\n')
excluded = []
for index, line in enumerate(lines):
if re.search(regex, line):
if re.search(r'^' + regex, line, re.IGNORECASE):
excluded.append(lines[index])
elif re.search(regex_inline, line):
lines[index] = re.sub(regex_inline, r'\1', line)
for line in excluded:
lines.remove(line)
return '\n'.join(lines)
def load_json(self, name, config_old, save=False):
config_new = config_old
path = './res_mods/configs/spoter_mods/%s/' % self.name
if not os.path.exists(path):
os.makedirs(path)
new_path = '%s%s.json' % (path, name)
if save:
with codecs.open(new_path, 'w', encoding='utf-8-sig') as json_file:
data = json.dumps(config_old, sort_keys=True, indent=4, ensure_ascii=False, encoding='utf-8-sig', separators=(',', ': '))
json_file.write('%s' % self.byte_ify(data))
json_file.close()
config_new = config_old
else:
if os.path.isfile(new_path):
try:
with codecs.open(new_path, 'r', encoding='utf-8-sig') as json_file:
data = self.json_comments(json_file.read().decode('utf-8-sig'))
config_new = self.byte_ify(json.loads(data))
json_file.close()
except Exception as e:
self.sys_mess()
print '%s%s' % (self.sys_mes['ERROR'], e)
else:
self.sys_mess()
print '%s[%s, %s %s]' % (self.sys_mes['ERROR'], self.code_pa(self.description), self.version, self.sys_mes['MSG_RECREATE_CONFIG'])
with codecs.open(new_path, 'w', encoding='utf-8-sig') as json_file:
data = json.dumps(config_old, sort_keys=True, indent=4, ensure_ascii=False, encoding='utf-8-sig', separators=(',', ': '))
json_file.write('%s' % self.byte_ify(data))
json_file.close()
config_new = config_old
print '%s[%s, %s %s]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.version, self.sys_mes['MSG_RECREATE_CONFIG_DONE'])
return config_new
@staticmethod
def code_pa(text):
try:
return text.encode('windows-1251')
except StandardError:
return text
def debugs(self, text):
if self.debug:
try:
text = text.encode('windows-1251')
except StandardError:
pass
print '%s%s [%s]: %s' % (datetime.datetime.now(), self.sys_mes['DEBUG'], self.code_pa(self.description), text)
def analytics_do(self):
if not self.analytics_started:
player = BigWorld.player()
param = urllib.urlencode({
'v': 1, # Version.
'tid': '%s' % self.tid, # Tracking ID / Property ID.
'cid': player.databaseID, # Anonymous Client ID.
't': 'screenview', # Screenview hit type.
'an': '%s' % self.description_analytics, # App name.
'av': '%s %s' % (self.description_analytics, self.version), # App version.
'cd': 'start [%s]' % AUTH_REALM # Screen name / content description.
})
self.debugs('http://www.google-analytics.com/collect?%s' % param)
urllib2.urlopen(url='http://www.google-analytics.com/collect?', data=param).read()
self.analytics_started = True
def analytics(self):
self._thread_analytics = threading.Thread(target=self.analytics_do, name='Thread')
self._thread_analytics.start()
def sys_mess(self):
self.sys_mes = {
'DEBUG': '[DEBUG]', 'LOAD_MOD': self.code_pa('[ЗАГРУЗКА]: ') if self.ru else '[LOAD_MOD]: ', 'INFO': self.code_pa('[ИНФО]: ') if self.ru else '[INFO]: ',
'ERROR': self.code_pa('[ОШИБКА]: ') if self.ru else '[ERROR]: ',
'MSG_RECREATE_CONFIG': self.code_pa('конфиг не найден, создаем заново') if self.ru else 'Config not found, recreating',
'MSG_RECREATE_CONFIG_DONE': self.code_pa('конфиг создан УСПЕШНО') if self.ru else 'Config recreating DONE',
'MSG_INIT': self.code_pa('применение настроек...') if self.ru else 'initialized ...', 'MSG_LANGUAGE_SET': self.code_pa('Выбран язык:') if self.ru else 'Language set to:',
'MSG_DISABLED': self.code_pa('отключен ...') if self.ru else 'disabled ...'
}
def load_mod(self):
self.do_config()
self.sys_mess()
print ''
print '%s[%s, %s]' % (self.sys_mes['LOAD_MOD'], self.code_pa(self.description), self.code_pa(self.author))
if self.enable:
self.debugs('Debug Activated ...')
print '%s[%s %s %s...]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.sys_mes['MSG_LANGUAGE_SET'], self.code_pa(self.data['config'].get('language')))
print '%s[%s, %s %s]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.version, self.sys_mes['MSG_INIT'])
else:
print '%s[%s, %s %s]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.version, self.sys_mes['MSG_DISABLED'])
print ''
class AutoAim(object):
def __init__(self):
self.autoaim_vehicle = None
self.view_edge_callback = None
self.view_direction_callback = None
self._box = None
self._model = None
self._model_blank = None
self._direction = None
self._create_config()
self._load_config()
def _create_config(self):
self.indicatorModel = True
self.indicatorEdge = False
self.indicatorDirection = True
self.indicatorBox = True
self._path = 'objects/autoaim_extended/'
self._box = None
self._box_tex = '%swg_enemy/box.dds' % self._path
self._model = None
self._model_tex = '%swg_enemy/marker.model' % self._path
self._model_blank = None
self._model_blank_text = '%sempty/marker.model' % self._path
self._direction = None
self.angle_autoaim = math.radians(1.28915504)
self._color = 'wg_enemy'
self._autoaim = (0.8588235294, 0.01568627451, 0, 1)
self._enemy = (1, 0, 0, 0.5)
self._friend = (0, 1, 0, 0.5)
self._flag = (1, 1, 1, 1)
self._autoaim_def = (0.2, 0.2, 0.2, 0.5)
self._enemy_def = (1, 0, 0, 0.5)
self._friend_def = (0, 1, 0, 0.5)
self._flag_def = (1, 1, 1, 1)
self._config = {'colors': {}}
self._config['colors']['blue'] = {'rgb': (0, 0, 255), 'edge': (0, 0, 1, 1), 'model': '%sblue/marker.model' % self._path, 'box': '%sblue/box.dds' % self._path}
self._config['colors']['brown'] = {
'rgb': (165, 42, 43), 'edge': (0.6470588235, 0.1647058824, 0.168627451, 1), 'model': '%sbrown/marker.model' % self._path, 'box': '%sbrown/box.dds' % self._path
}
self._config['colors']['chocolate'] = {
'rgb': (211, 105, 30), 'edge': (0.8274509804, 0.4117647059, 0.1176470588, 1), 'model': '%schocolate/marker.model' % self._path, 'box': '%schocolate/box.dds' % self._path
}
self._config['colors']['cornflower_blue'] = {
'rgb': (101, 149, 238), 'edge': (0.3960784314, 0.5843137255, 0.9333333333, 1), 'model': '%scornflower_blue/marker.model' % self._path, 'box': '%scornflower_blue/box.dds' % self._path
}
self._config['colors']['cream'] = {
'rgb': (252, 245, 200), 'edge': (0.9882352941, 0.9607843137, 0.7843137255, 1), 'model': '%scream/marker.model' % self._path, 'box': '%scream/box.dds' % self._path
}
self._config['colors']['cyan'] = {'rgb': (0, 255, 255), 'edge': (0, 1, 1, 1), 'model': '%scyan/marker.model' % self._path, 'box': '%scyan/box.dds' % self._path}
self._config['colors']['emerald'] = {
'rgb': (40, 240, 156), 'edge': (0.1568627451, 0.9411764706, 0.6117647059, 1), 'model': '%semerald/marker.model' % self._path, 'box': '%semerald/box.dds' % self._path
}
self._config['colors']['gold'] = {'rgb': (255, 215, 0), 'edge': (1, 0.8431372549, 0, 1), 'model': '%sgold/marker.model' % self._path, 'box': '%sgold/box.dds' % self._path}
self._config['colors']['green'] = {'rgb': (0, 128, 0), 'edge': (0, 0.5019607843, 0, 1), 'model': '%sgreen/marker.model' % self._path, 'box': '%sgreen/box.dds' % self._path}
self._config['colors']['green_yellow'] = {
'rgb': (173, 255, 46), 'edge': (0.6784313725, 1, 0.1803921569, 1), 'model': '%sgreen_yellow/marker.model' % self._path, 'box': '%sgreen_yellow/box.dds' % self._path
}
self._config['colors']['hot_pink'] = {
'rgb': (255, 105, 181), 'edge': (1, 0.4117647059, 0.7098039216, 1), 'model': '%shot_pink/marker.model' % self._path, 'box': '%shot_pink/box.dds' % self._path
}
self._config['colors']['lime'] = {'rgb': (0, 255, 0), 'edge': (0, 1, 0, 1), 'model': '%slime/marker.model' % self._path, 'box': '%slime/box.dds' % self._path}
self._config['colors']['orange'] = {'rgb': (255, 165, 0), 'edge': (1, 0.6470588235, 0, 1), 'model': '%sorange/marker.model' % self._path, 'box': '%sorange/box.dds' % self._path}
self._config['colors']['pink'] = {'rgb': (255, 192, 203), 'edge': (1, 0.7529411765, 0.7960784314, 1), 'model': '%spink/marker.model' % self._path, 'box': '%spink/box.dds' % self._path}
self._config['colors']['purple'] = {'rgb': (128, 0, 128), 'edge': (0.5019607843, 0, 0.5019607843, 1), 'model': '%spurple/marker.model' % self._path, 'box': '%spurple/box.dds' % self._path}
self._config['colors']['red'] = {'rgb': (255, 0, 0), 'edge': (1, 0, 0, 1), 'model': '%sred/marker.model' % self._path, 'box': '%sred/box.dds' % self._path}
self._config['colors']['wg_blur'] = {
'rgb': (131, 120, 252), 'edge': (0.5137254902, 0.4705882353, 0.9882352941, 1), 'model': '%swg_blur/marker.model' % self._path, 'box': '%swg_blur/box.dds' % self._path
}
self._config['colors']['wg_enemy'] = {
'rgb': (219, 4, 0), 'edge': (0.8588235294, 0.01568627451, 0, 1), 'model': '%swg_enemy/marker.model' % self._path, 'box': '%swg_enemy/box.dds' % self._path
}
self._config['colors']['wg_friend'] = {
'rgb': (128, 214, 57), 'edge': (0.5019607843, 0.8392156863, 0.2235294118, 1), 'model': '%swg_friend/marker.model' % self._path, 'box': '%swg_friend/box.dds' % self._path
}
self._config['colors']['wg_squad'] = {
'rgb': (255, 224, 65), 'edge': (1, 0.8784313725, 0.2549019608, 1), 'model': '%swg_squad/marker.model' % self._path, 'box': '%swg_squad/box.dds' % self._path
}
self._config['colors']['yellow'] = {'rgb': (255, 255, 0), 'edge': (1, 1, 0, 1), 'model': '%syellow/marker.model' % self._path, 'box': '%syellow/box.dds' % self._path}
def _load_config(self):
self._color = config.data['config'].get('color', 'wg_enemy')
config_indicators = config.data['config'].get('indicators')
self.indicatorModel = config_indicators.get('model', True)
self.indicatorDirection = config_indicators.get('direction', True)
self.indicatorBox = config_indicators.get('box', True)
self._box_tex = self._config['colors'][self._color]['box']
self._model_tex = self._config['colors'][self._color]['model']
self._autoaim = self._config['colors'][self._color]['edge']
def find_autoaim_target(self):
auto_aim_vehicle = property(lambda self_other: BigWorld.entities.get(self_other.__autoAimVehID, None))
print('find_autoaim_target', auto_aim_vehicle)
if auto_aim_vehicle is None and BigWorld.target() is not None:
return BigWorld.target()
player = BigWorld.player()
vehicles = player.arena.vehicles
camera_dir, camera_pos = cameras.getWorldRayAndPoint(0, 0)
camera_dir.normalise()
result_len = None
las_vehicle = None
min_radian = 100000.0
for vId, vData in vehicles.items():
if vData['team'] == player.team:
continue
vehicle = BigWorld.entity(vId)
if vehicle is None or not vehicle.isStarted or not vehicle.isAlive():
continue
temp1, radian = self._calc_radian(vehicle.position, self.angle_autoaim) #1.289 градуса в радианах
if not temp1 and temp1 is not None:
continue
length = self._calc_length(vehicle.position, BigWorld.player().position)
if radian:
if result_len is None:
result_len = length
las_vehicle = vehicle
if radian < min_radian and result_len >= length:
min_radian = radian
las_vehicle = vehicle
result = las_vehicle
if result is not None:
if BigWorld.wg_collideSegment(BigWorld.player().spaceID, BigWorld.entity(result.id).appearance.modelsDesc['gun']['model'].position, camera_pos, False) is None:
return result
return BigWorld.target()
@staticmethod
def _calc_length(start_position, end_position):
return (end_position - start_position).length
@staticmethod
def _calc_radian(target_position, angle):
camera_dir, camera_pos = cameras.getWorldRayAndPoint(0, 0)
camera_dir.normalise()
camera_to_target = target_position - camera_pos
a = camera_to_target.dot(camera_dir)
if a < 0:
return False, None
target_radian = camera_to_target.lengthSquared
radian = 1.0 - a * a / target_radian
if radian > angle:
return False, None
return True, radian
@staticmethod
def get_battle_on():
try:
if BigWorld.player().arena: return True
except StandardError: return False
return hasattr(BigWorld.player(), 'arena')
@staticmethod
def get_is_live(vehicle_id):
try: return BigWorld.player().arena.vehicles[vehicle_id]['isAlive']
except StandardError: return False
def get_is_friendly(self, vehicle_id):
player = BigWorld.player()
return self.get_battle_on() and player.arena.vehicles[player.playerVehicleID]['team'] == player.arena.vehicles[vehicle_id]['team']
def create_indicators(self):
if self.indicatorBox:
self.create_box()
if self.indicatorDirection:
self.create_direction()
if self.indicatorModel:
self.create_model()
def install_indicators(self):
self.autoaim_vehicle = None
self.create_indicators()
def uninstall_indicators(self):
self.delete_indicators()
self.autoaim_vehicle = None
self.view_edge_callback = None
self.view_direction_callback = None
def view_indicators(self):
if isinstance(self.autoaim_vehicle, Vehicle.Vehicle) and self.autoaim_vehicle.isStarted and self.get_is_live(self.autoaim_vehicle.id) and not self.get_is_friendly(self.autoaim_vehicle.id):
if self.indicatorBox:
self.view_box()
if self.indicatorEdge:
self.view_edge_callback = BigWorld.callback(0.5, self.view_edge)
if self.indicatorModel:
self.view_model()
if self.indicatorDirection:
self.view_direction_callback = BigWorld.callback(0.5, self.view_direction)
else:
self.autoaim_vehicle = None
def hide_indicators(self):
if self.indicatorBox:
self.hide_box()
if self.indicatorEdge:
if self.view_edge_callback:
self.view_edge_callback = None
if self.indicatorModel:
self.hide_model()
if self.indicatorDirection:
if self.view_direction_callback:
self.view_direction_callback = None
self.hide_direction()
self.autoaim_vehicle = None
def create_box(self):
self._box = GUI.BoundingBox(self._box_tex)
self._box.size = (0.01, 0.01)
self._box.visible = False
GUI.addRoot(self._box)
def create_model(self):
if self._model:
self.delete_model()
if self._model_blank:
self.delete_blank_model()
if self.indicatorModel:
self._model = BigWorld.Model(self._model_tex)
self._model.visible = False
elif self.indicatorEdge and not self.indicatorModel:
self._model_blank = BigWorld.Model(self._model_blank_text)
self._model_blank.visible = False
def create_direction(self):
if self.indicatorDirection:
# noinspection PyProtectedMember
self._direction = layout._DirectionIndicator()
self._direction.component.visible = False
self._direction.active(False)
if self._color in ['cream', 'emerald', 'gold', 'green', 'green_yellow', 'lime', 'wg_friend', 'wg_squad', 'yellow']:
self._direction.setShape('green')
elif self._color in ['brown', 'chocolate', 'orange', 'pink', 'red', 'wg_enemy']:
self._direction.setShape('red')
elif self._color in ['blue', 'cornflower_blue', 'cyan', 'hot_pink', 'purple', 'wg_blur']:
self._direction.setShape('purple')
else:
self._direction.setShape('red')
def view_box(self):
if hasattr(self.autoaim_vehicle, 'model') and self._box:
self._box.source = self.autoaim_vehicle.model.bounds
self._box.visible = True
def view_model(self):
if self._model:
if hasattr(self.autoaim_vehicle, 'appearance'):
self.autoaim_vehicle.appearance.modelsDesc['hull']['model'].node('HP_turretJoint').attach(self._model)
self._model.visible = True
if self._model_blank:
if hasattr(self.autoaim_vehicle, 'appearance'):
self.autoaim_vehicle.appearance.modelsDesc['hull']['model'].node('HP_turretJoint').attach(self._model_blank)
self._model_blank.visible = True
def view_edge(self):
if hasattr(self.autoaim_vehicle, 'appearance') and hasattr(self.autoaim_vehicle, 'model') and self.autoaim_vehicle.isAlive():
BigWorld.wgDelEdgeDetectEntity(self.autoaim_vehicle)
if BigWorld.wg_collideSegment(BigWorld.player().spaceID, self.autoaim_vehicle.appearance.modelsDesc['gun']['model'].position, BigWorld.entity(BigWorld.player(
).playerVehicleID).appearance.modelsDesc['gun']['model'].position, False) is None:
BigWorld.wgSetEdgeDetectColors((Math.Vector4(self._autoaim_def), Math.Vector4(self._enemy), Math.Vector4(self._friend), Math.Vector4(self._autoaim)))
BigWorld.wgAddEdgeDetectEntity(self.autoaim_vehicle, 3, 0)
self.view_edge_callback = BigWorld.callback(0.5, self.view_edge)
def view_direction(self):
try:
if self.autoaim_vehicle is not None and self.get_is_live(self.autoaim_vehicle.id):
self._direction.component.visible = True
self._direction.active(True)
matrix = self.autoaim_vehicle.matrix
if matrix:
m = Math.Matrix(matrix)
pos = m.translation
length = (BigWorld.player().position - pos).length
self._direction.setPosition(pos)
self._direction.setDistance(length)
self.view_direction_callback = BigWorld.callback(0.5, self.view_direction)
except StandardError:
self.view_direction_callback = None
def hide_box(self):
if self._box:
self._box.source = None
self._box.visible = False
def hide_model(self):
if self._model and self._model.visible:
self._model.visible = False
if hasattr(self.autoaim_vehicle, 'appearance'):
turret_position = self.autoaim_vehicle.appearance.modelsDesc['hull']['model'].node('HP_turretJoint')
if turret_position.attachments.length != 0:
turret_position.detach(self._model)
if self._model_blank and self._model_blank.visible:
self._model_blank.visible = False
if hasattr(self.autoaim_vehicle, 'appearance'):
turret_position = self.autoaim_vehicle.appearance.modelsDesc['hull']['model'].node('HP_turretJoint')
if turret_position.attachments.length != 0:
turret_position.detach(self._model_blank)
self.create_model()
def hide_edge(self):
if hasattr(self.autoaim_vehicle, 'appearance'):
BigWorld.wgDelEdgeDetectEntity(self.autoaim_vehicle)
BigWorld.wgSetEdgeDetectColors((Math.Vector4(self._autoaim_def), Math.Vector4(self._enemy_def), Math.Vector4(self._friend_def), Math.Vector4(self._flag_def)))
def hide_direction(self):
if self._direction:
self._direction.component.visible = False
self._direction.active(False)
def delete_indicators(self):
self.delete_direction()
self.delete_box()
self.delete_model()
def delete_direction(self):
if self._direction:
self._direction = None
def delete_box(self):
if self._box:
GUI.delRoot(self._box)
self._box = None
def delete_model(self):
self._model = None
def delete_blank_model(self):
self._model_blank = None
def start_battle(self):
BigWorld.player().arena.onVehicleKilled += self.injected_on_vehicle_killed
self.install_indicators()
def stop_battle(self):
BigWorld.player().arena.onVehicleKilled -= self.injected_on_vehicle_killed
self.hide_indicators()
self.uninstall_indicators()
def injected_on_vehicle_killed(self, target_id, attacker_id, equipment_id, reason):
_, _, _ = attacker_id, reason, equipment_id
if self.autoaim_vehicle and target_id == self.autoaim_vehicle.id:
self.hide_indicators()
if target_id == BigWorld.player().playerVehicleID:
self.hide_indicators()
# deformed functions:
def hook_update_all(self):
hooked_UpdateAll(self)
config.analytics()
def hook_auto_aim(self, target):
if config.enable and not self.autoaim_extended.use:
old_vehicle = autoaim_extended.autoaim_vehicle
if autoaim_extended.autoaim_vehicle:
autoaim_extended.hide_indicators()
if old_vehicle != target:
autoaim_extended.autoaim_vehicle = target
autoaim_extended.view_indicators()
return hooked_autoAim(self, target)
def hook_on_auto_aim_vehicle_lost(self):
if config.enable:
if autoaim_extended.autoaim_vehicle:
autoaim_extended.hide_indicators()
return hooked_onAutoAimVehicleLost(self)
def hook_on_lock_target(self, lock):
if config.enable:
player = BigWorld.player()
if not isPlayerAvatar():
return
if self.isPlaying:
if lock == 1:
player.autoAim(autoaim_extended.find_autoaim_target())
elif lock == 0:
player.autoAim(None)
else:
player.autoAim(None)
elif self.isRecording:
self._BattleReplay__replayCtrl.onLockTarget(lock)
else:
hooked_onLockTarget(self, lock)
def hook_start_battle(self):
hooked_start_battle(self)
if config.enable:
autoaim_extended.start_battle()
def hook_stop_battle(self):
hooked_stop_battle(self)
if config.enable:
autoaim_extended.stop_battle()
class Autoaim_extended():
def __init__(self):
self.use = False
self.target = autoaim_extended.autoaim_vehicle
def start(self, target):
if not self.use:
self.use = True
if config.enable:
old_vehicle = autoaim_extended.autoaim_vehicle
if autoaim_extended.autoaim_vehicle:
autoaim_extended.hide_indicators()
if old_vehicle != target:
autoaim_extended.autoaim_vehicle = target
autoaim_extended.view_indicators()
return True
return
def stop(self):
if not self.use:
self.use = True
if config.enable:
if autoaim_extended.autoaim_vehicle:
autoaim_extended.hide_indicators()
return True
return
#hooked
# noinspection PyProtectedMember
hooked_UpdateAll = Hangar._Hangar__updateAll
hooked_autoAim = PlayerAvatar.autoAim
hooked_onAutoAimVehicleLost = PlayerAvatar.onAutoAimVehicleLost
hooked_onLockTarget = BattleReplay.onLockTarget
hooked_start_battle = Battle.afterCreate
hooked_stop_battle = Battle.beforeDelete
#hook
Hangar._Hangar__updateAll = hook_update_all
PlayerAvatar.autoAim = hook_auto_aim
PlayerAvatar.onAutoAimVehicleLost = hook_on_auto_aim_vehicle_lost
BattleReplay.onLockTarget = hook_on_lock_target
Battle.afterCreate = hook_start_battle
Battle.beforeDelete = hook_stop_battle
#start mod
config = Config()
config.load_mod()
autoaim_extended = AutoAim()
PlayerAvatar.autoaim_extended = Autoaim_extended() |
can_replay.py | #!/usr/bin/env python3
import os
import time
import threading
from tqdm import tqdm
os.environ['FILEREADER_CACHE'] = '1'
from common.basedir import BASEDIR
from common.realtime import config_realtime_process, Ratekeeper, DT_CTRL
from selfdrive.boardd.boardd import can_capnp_to_can_list
from tools.lib.logreader import LogReader
from panda import Panda
try:
from panda_jungle import PandaJungle # pylint: disable=import-error
except Exception:
PandaJungle = None # type: ignore
def send_thread(s, flock):
if "Jungle" in str(type(s)):
if "FLASH" in os.environ:
with flock:
s.flash()
for i in [0, 1, 2, 3, 0xFFFF]:
s.can_clear(i)
s.set_ignition(False)
time.sleep(5)
s.set_ignition(True)
s.set_panda_power(True)
else:
s.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
s.set_can_loopback(False)
idx = 0
ign = True
rk = Ratekeeper(1 / DT_CTRL, print_delay_threshold=None)
while True:
# handle ignition cycling
if ENABLE_IGN:
i = (rk.frame*DT_CTRL) % (IGN_ON + IGN_OFF) < IGN_ON
if i != ign:
ign = i
s.set_ignition(ign)
snd = CAN_MSGS[idx]
snd = list(filter(lambda x: x[-1] <= 2, snd))
s.can_send_many(snd)
idx = (idx + 1) % len(CAN_MSGS)
# Drain panda message buffer
s.can_recv()
rk.keep_time()
def connect():
config_realtime_process(3, 55)
serials = {}
flashing_lock = threading.Lock()
while True:
# look for new devices
for p in [Panda, PandaJungle]:
if p is None:
continue
for s in p.list():
if s not in serials:
print("starting send thread for", s)
serials[s] = threading.Thread(target=send_thread, args=(p(s), flashing_lock))
serials[s].start()
# try to join all send threads
cur_serials = serials.copy()
for s, t in cur_serials.items():
t.join(0.01)
if not t.is_alive():
del serials[s]
time.sleep(1)
if __name__ == "__main__":
if PandaJungle is None:
print("\33[31m", "WARNING: cannot connect to jungles. Clone the jungle library to enable support:", "\033[0m")
print("\033[34m", f"cd {BASEDIR} && git clone https://github.com/commaai/panda_jungle", "\033[0m")
print("Loading log...")
ROUTE = "77611a1fac303767/2020-03-24--09-50-38"
REPLAY_SEGS = list(range(10, 16)) # route has 82 segments available
CAN_MSGS = []
for i in tqdm(REPLAY_SEGS):
log_url = f"https://commadataci.blob.core.windows.net/openpilotci/{ROUTE}/{i}/rlog.bz2"
lr = LogReader(log_url)
CAN_MSGS += [can_capnp_to_can_list(m.can) for m in lr if m.which() == 'can']
# set both to cycle ignition
IGN_ON = int(os.getenv("ON", "0"))
IGN_OFF = int(os.getenv("OFF", "0"))
ENABLE_IGN = IGN_ON > 0 and IGN_OFF > 0
if ENABLE_IGN:
print(f"Cycling ignition: on for {IGN_ON}s, off for {IGN_OFF}s")
connect()
|
IntegrationTests.py | import multiprocessing
import sys
import time
import unittest
import percy
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
TIMEOUT = 20
class IntegrationTests(unittest.TestCase):
def percy_snapshot(cls, name=""):
snapshot_name = "{} - py{}.{}".format(
name, sys.version_info.major, sys.version_info.minor
)
print(snapshot_name)
cls.percy_runner.snapshot(name=snapshot_name)
def wait_for_element_by_css_selector(self, selector):
return WebDriverWait(self.driver, TIMEOUT).until(
EC.presence_of_element_located((By.CSS_SELECTOR, selector))
)
def wait_for_text_to_equal(self, selector, assertion_text):
return WebDriverWait(self.driver, TIMEOUT).until(
EC.text_to_be_present_in_element(
(By.CSS_SELECTOR, selector), assertion_text
)
)
@classmethod
def setUpClass(cls):
super(IntegrationTests, cls).setUpClass()
cls.driver = webdriver.Chrome()
loader = percy.ResourceLoader(
webdriver=cls.driver, base_url="/assets", root_dir="tests/assets"
)
cls.percy_runner = percy.Runner(loader=loader)
cls.percy_runner.initialize_build()
@classmethod
def tearDownClass(cls):
super(IntegrationTests, cls).tearDownClass()
cls.driver.quit()
cls.percy_runner.finalize_build()
def setUp(s):
pass
def tearDown(s):
if hasattr(s, "server_process"):
time.sleep(2)
s.server_process.terminate()
time.sleep(2)
def startServer(s, dash, port=8050):
def run():
dash.scripts.config.serve_locally = True
dash.run_server(port=port, debug=False, processes=4, threaded=False)
# Run on a separate process so that it doesn't block
s.server_process = multiprocessing.Process(target=run)
s.server_process.start()
time.sleep(0.5)
# Visit the dash page
s.driver.get("http://localhost:{}".format(port))
time.sleep(0.5)
# Inject an error and warning logger
logger = """
window.tests = {};
window.tests.console = {error: [], warn: [], log: []};
var _log = console.log;
var _warn = console.warn;
var _error = console.error;
console.log = function() {
window.tests.console.log.push({method: 'log', arguments: arguments});
return _log.apply(console, arguments);
};
console.warn = function() {
window.tests.console.warn.push({method: 'warn', arguments: arguments});
return _warn.apply(console, arguments);
};
console.error = function() {
window.tests.console.error.push({method: 'error', arguments: arguments});
return _error.apply(console, arguments);
};
"""
s.driver.execute_script(logger)
|
k8s.py | from __future__ import print_function, division, unicode_literals
import base64
import functools
import json
import logging
import os
import re
import subprocess
import tempfile
from copy import deepcopy
from pathlib import Path
from threading import Thread
from time import sleep
from typing import Text, List
import yaml
from clearml_agent.commands.events import Events
from clearml_agent.commands.worker import Worker
from clearml_agent.definitions import ENV_DOCKER_IMAGE
from clearml_agent.errors import APIError
from clearml_agent.helper.base import safe_remove_file
from clearml_agent.helper.dicts import merge_dicts
from clearml_agent.helper.process import get_bash_output
from clearml_agent.helper.resource_monitor import ResourceMonitor
from clearml_agent.interface.base import ObjectID
class K8sIntegration(Worker):
K8S_PENDING_QUEUE = "k8s_scheduler"
K8S_DEFAULT_NAMESPACE = "clearml"
KUBECTL_APPLY_CMD = "kubectl apply -f"
KUBECTL_RUN_CMD = "kubectl run clearml-{queue_name}-id-{task_id} " \
"--image {docker_image} " \
"--restart=Never --replicas=1 " \
"--generator=run-pod/v1 " \
"--namespace={namespace}"
KUBECTL_DELETE_CMD = "kubectl delete pods " \
"--selector=TRAINS=agent " \
"--field-selector=status.phase!=Pending,status.phase!=Running " \
"--namespace={namespace}"
BASH_INSTALL_SSH_CMD = [
"apt-get install -y openssh-server",
"mkdir -p /var/run/sshd",
"echo 'root:training' | chpasswd",
"echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config",
"sed -i 's/PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config",
r"sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd",
"echo 'AcceptEnv TRAINS_API_ACCESS_KEY TRAINS_API_SECRET_KEY CLEARML_API_ACCESS_KEY CLEARML_API_SECRET_KEY' "
">> /etc/ssh/sshd_config",
'echo "export VISIBLE=now" >> /etc/profile',
'echo "export PATH=$PATH" >> /etc/profile',
'echo "ldconfig" >> /etc/profile',
"/usr/sbin/sshd -p {port}"]
CONTAINER_BASH_SCRIPT = [
"export DEBIAN_FRONTEND='noninteractive'",
"echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/docker-clean",
"chown -R root /root/.cache/pip",
"apt-get update",
"apt-get install -y git libsm6 libxext6 libxrender-dev libglib2.0-0",
"declare LOCAL_PYTHON",
"for i in {{10..5}}; do which python3.$i && python3.$i -m pip --version && "
"export LOCAL_PYTHON=$(which python3.$i) && break ; done",
"[ ! -z $LOCAL_PYTHON ] || apt-get install -y python3-pip",
"[ ! -z $LOCAL_PYTHON ] || export LOCAL_PYTHON=python3",
"$LOCAL_PYTHON -m pip install clearml-agent",
"{extra_bash_init_cmd}",
"$LOCAL_PYTHON -m clearml_agent execute --full-monitoring --require-queue --id {task_id}"
]
AGENT_LABEL = "TRAINS=agent"
LIMIT_POD_LABEL = "ai.allegro.agent.serial=pod-{pod_number}"
_edit_hyperparams_version = "2.9"
def __init__(
self,
k8s_pending_queue_name=None,
kubectl_cmd=None,
container_bash_script=None,
debug=False,
ports_mode=False,
num_of_services=20,
base_pod_num=1,
user_props_cb=None,
overrides_yaml=None,
template_yaml=None,
clearml_conf_file=None,
extra_bash_init_script=None,
namespace=None,
**kwargs
):
"""
Initialize the k8s integration glue layer daemon
:param str k8s_pending_queue_name: queue name to use when task is pending in the k8s scheduler
:param str|callable kubectl_cmd: kubectl command line str, supports formatting (default: KUBECTL_RUN_CMD)
example: "task={task_id} image={docker_image} queue_id={queue_id}"
or a callable function: kubectl_cmd(task_id, docker_image, queue_id, task_data)
:param str container_bash_script: container bash script to be executed in k8s (default: CONTAINER_BASH_SCRIPT)
Notice this string will use format() call, if you have curly brackets they should be doubled { -> {{
Format arguments passed: {task_id} and {extra_bash_init_cmd}
:param bool debug: Switch logging on
:param bool ports_mode: Adds a label to each pod which can be used in services in order to expose ports.
Requires the `num_of_services` parameter.
:param int num_of_services: Number of k8s services configured in the cluster. Required if `port_mode` is True.
(default: 20)
:param int base_pod_num: Used when `ports_mode` is True, sets the base pod number to a given value (default: 1)
:param callable user_props_cb: An Optional callable allowing additional user properties to be specified
when scheduling a task to run in a pod. Callable can receive an optional pod number and should return
a dictionary of user properties (name and value). Signature is [[Optional[int]], Dict[str,str]]
:param str overrides_yaml: YAML file containing the overrides for the pod (optional)
:param str template_yaml: YAML file containing the template for the pod (optional).
If provided the pod is scheduled with kubectl apply and overrides are ignored, otherwise with kubectl run.
:param str clearml_conf_file: clearml.conf file to be use by the pod itself (optional)
:param str extra_bash_init_script: Additional bash script to run before starting the Task inside the container
:param str namespace: K8S namespace to be used when creating the new pods (default: clearml)
"""
super(K8sIntegration, self).__init__()
self.k8s_pending_queue_name = k8s_pending_queue_name or self.K8S_PENDING_QUEUE
self.kubectl_cmd = kubectl_cmd or self.KUBECTL_RUN_CMD
self.container_bash_script = container_bash_script or self.CONTAINER_BASH_SCRIPT
# Always do system packages, because by we will be running inside a docker
self._session.config.put("agent.package_manager.system_site_packages", True)
# Add debug logging
if debug:
self.log.logger.disabled = False
self.log.logger.setLevel(logging.INFO)
self.ports_mode = ports_mode
self.num_of_services = num_of_services
self.base_pod_num = base_pod_num
self._edit_hyperparams_support = None
self._user_props_cb = user_props_cb
self.conf_file_content = None
self.overrides_json_string = None
self.template_dict = None
self.extra_bash_init_script = extra_bash_init_script or None
if self.extra_bash_init_script and not isinstance(self.extra_bash_init_script, str):
self.extra_bash_init_script = ' ; '.join(self.extra_bash_init_script) # noqa
self.namespace = namespace or self.K8S_DEFAULT_NAMESPACE
self.pod_limits = []
self.pod_requests = []
if overrides_yaml:
with open(os.path.expandvars(os.path.expanduser(str(overrides_yaml))), 'rt') as f:
overrides = yaml.load(f, Loader=getattr(yaml, 'FullLoader', None))
if overrides:
containers = overrides.get('spec', {}).get('containers', [])
for c in containers:
resources = {str(k).lower(): v for k, v in c.get('resources', {}).items()}
if not resources:
continue
if resources.get('limits'):
self.pod_limits += ['{}={}'.format(k, v) for k, v in resources['limits'].items()]
if resources.get('requests'):
self.pod_requests += ['{}={}'.format(k, v) for k, v in resources['requests'].items()]
# remove double entries
self.pod_limits = list(set(self.pod_limits))
self.pod_requests = list(set(self.pod_requests))
if self.pod_limits or self.pod_requests:
self.log.warning('Found pod container requests={} limits={}'.format(
self.pod_limits, self.pod_requests))
if containers:
self.log.warning('Removing containers section: {}'.format(overrides['spec'].pop('containers')))
self.overrides_json_string = json.dumps(overrides)
if template_yaml:
with open(os.path.expandvars(os.path.expanduser(str(template_yaml))), 'rt') as f:
self.template_dict = yaml.load(f, Loader=getattr(yaml, 'FullLoader', None))
clearml_conf_file = clearml_conf_file or kwargs.get('trains_conf_file')
if clearml_conf_file:
with open(os.path.expandvars(os.path.expanduser(str(clearml_conf_file))), 'rt') as f:
self.conf_file_content = f.read()
# make sure we use system packages!
self.conf_file_content += '\nagent.package_manager.system_site_packages=true\n'
self._monitor_hanging_pods()
def _monitor_hanging_pods(self):
_check_pod_thread = Thread(target=self._monitor_hanging_pods_daemon)
_check_pod_thread.daemon = True
_check_pod_thread.start()
def _monitor_hanging_pods_daemon(self):
while True:
output = get_bash_output('kubectl get pods -n {namespace} -o=JSON'.format(
namespace=self.namespace
))
output = '' if not output else output if isinstance(output, str) else output.decode('utf-8')
try:
output_config = json.loads(output)
except Exception as ex:
self.log.warning('K8S Glue pods monitor: Failed parsing kubectl output:\n{}\nEx: {}'.format(output, ex))
sleep(self._polling_interval)
continue
pods = output_config.get('items', [])
for pod in pods:
try:
reason = functools.reduce(
lambda a, b: a[b], ('status', 'containerStatuses', 0, 'state', 'waiting', 'reason'), pod
)
except (IndexError, KeyError):
continue
if reason == 'ImagePullBackOff':
pod_name = pod.get('metadata', {}).get('name', None)
if pod_name:
task_id = pod_name.rpartition('-')[-1]
delete_pod_cmd = 'kubectl delete pods {} -n {}'.format(pod_name, self.namespace)
get_bash_output(delete_pod_cmd)
try:
self._session.api_client.tasks.failed(
task=task_id,
status_reason="K8S glue error due to ImagePullBackOff",
status_message="Changed by K8S glue",
force=True
)
except Exception as ex:
self.log.warning(
'K8S Glue pods monitor: Failed deleting task "{}"\nEX: {}'.format(task_id, ex)
)
sleep(self._polling_interval)
def _set_task_user_properties(self, task_id: str, **properties: str):
if self._edit_hyperparams_support is not True:
# either not supported or never tested
if self._edit_hyperparams_support == self._session.api_version:
# tested against latest api_version, not supported
return
if not self._session.check_min_api_version(self._edit_hyperparams_version):
# not supported due to insufficient api_version
self._edit_hyperparams_support = self._session.api_version
return
try:
self._session.get(
service="tasks",
action="edit_hyper_params",
task=task_id,
hyperparams=[
{
"section": "properties",
"name": k,
"value": str(v),
}
for k, v in properties.items()
],
)
# definitely supported
self._runtime_props_support = True
except APIError as error:
if error.code == 404:
self._edit_hyperparams_support = self._session.api_version
def run_one_task(self, queue: Text, task_id: Text, worker_args=None, **_):
print('Pulling task {} launching on kubernetes cluster'.format(task_id))
task_data = self._session.api_client.tasks.get_all(id=[task_id])[0]
# push task into the k8s queue, so we have visibility on pending tasks in the k8s scheduler
try:
print('Pushing task {} into temporary pending queue'.format(task_id))
self._session.api_client.tasks.reset(task_id)
self._session.api_client.tasks.enqueue(task_id, queue=self.k8s_pending_queue_name,
status_reason='k8s pending scheduler')
except Exception as e:
self.log.error("ERROR: Could not push back task [{}] to k8s pending queue [{}], error: {}".format(
task_id, self.k8s_pending_queue_name, e))
return
if task_data.execution.docker_cmd:
docker_parts = task_data.execution.docker_cmd
else:
docker_parts = str(ENV_DOCKER_IMAGE.get() or
self._session.config.get("agent.default_docker.image", "nvidia/cuda"))
# take the first part, this is the docker image name (not arguments)
docker_parts = docker_parts.split()
docker_image = docker_parts[0]
docker_args = docker_parts[1:] if len(docker_parts) > 1 else []
# get the clearml.conf encoded file
# noinspection PyProtectedMember
hocon_config_encoded = (
self.conf_file_content
or Path(self._session._config_file).read_text()
).encode("ascii")
create_clearml_conf = "echo '{}' | base64 --decode >> ~/clearml.conf".format(
base64.b64encode(
hocon_config_encoded
).decode('ascii')
)
if self.ports_mode:
print("Kubernetes looking for available pod to use")
# noinspection PyBroadException
try:
queue_name = self._session.api_client.queues.get_by_id(queue=queue).name
except Exception:
queue_name = 'k8s'
# conform queue name to k8s standards
safe_queue_name = queue_name.lower().strip()
safe_queue_name = re.sub(r'\W+', '', safe_queue_name).replace('_', '').replace('-', '')
# Search for a free pod number
pod_count = 0
pod_number = self.base_pod_num
while self.ports_mode:
pod_number = self.base_pod_num + pod_count
kubectl_cmd_new = "kubectl get pods -l {pod_label},{agent_label} -n {namespace}".format(
pod_label=self.LIMIT_POD_LABEL.format(pod_number=pod_number),
agent_label=self.AGENT_LABEL,
namespace=self.namespace,
)
process = subprocess.Popen(kubectl_cmd_new.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
output = '' if not output else output if isinstance(output, str) else output.decode('utf-8')
error = '' if not error else error if isinstance(error, str) else error.decode('utf-8')
if not output:
# No such pod exist so we can use the pod_number we found
break
if pod_count >= self.num_of_services - 1:
# All pod numbers are taken, exit
self.log.warning(
"kubectl last result: {}\n{}\nAll k8s services are in use, task '{}' "
"will be enqueued back to queue '{}'".format(
error, output, task_id, queue
)
)
self._session.api_client.tasks.reset(task_id)
self._session.api_client.tasks.enqueue(
task_id, queue=queue, status_reason='k8s max pod limit (no free k8s service)')
return
pod_count += 1
labels = ([self.LIMIT_POD_LABEL.format(pod_number=pod_number)] if self.ports_mode else []) + [self.AGENT_LABEL]
if self.ports_mode:
print("Kubernetes scheduling task id={} on pod={} (pod_count={})".format(task_id, pod_number, pod_count))
else:
print("Kubernetes scheduling task id={}".format(task_id))
if self.template_dict:
output, error = self._kubectl_apply(
create_clearml_conf=create_clearml_conf,
labels=labels, docker_image=docker_image, docker_args=docker_args,
task_id=task_id, queue=queue, queue_name=safe_queue_name)
else:
output, error = self._kubectl_run(
create_clearml_conf=create_clearml_conf,
labels=labels, docker_image=docker_image,
task_data=task_data,
task_id=task_id, queue=queue, queue_name=safe_queue_name)
error = '' if not error else (error if isinstance(error, str) else error.decode('utf-8'))
output = '' if not output else (output if isinstance(output, str) else output.decode('utf-8'))
print('kubectl output:\n{}\n{}'.format(error, output))
if error:
send_log = "Running kubectl encountered an error: {}".format(error)
self.log.error(send_log)
self.send_logs(task_id, send_log.splitlines())
user_props = {"k8s-queue": str(queue_name)}
if self.ports_mode:
user_props.update(
{
"k8s-pod-number": pod_number,
"k8s-pod-label": labels[0],
"k8s-internal-pod-count": pod_count,
}
)
if self._user_props_cb:
# noinspection PyBroadException
try:
custom_props = self._user_props_cb(pod_number) if self.ports_mode else self._user_props_cb()
user_props.update(custom_props)
except Exception:
pass
if user_props:
self._set_task_user_properties(
task_id=task_id,
**user_props
)
def _parse_docker_args(self, docker_args):
# type: (list) -> dict
kube_args = {'env': []}
while docker_args:
cmd = docker_args.pop().strip()
if cmd in ('-e', '--env',):
env = docker_args.pop().strip()
key, value = env.split('=', 1)
kube_args[key] += {key: value}
else:
self.log.warning('skipping docker argument {} (only -e --env supported)'.format(cmd))
return kube_args
def _kubectl_apply(self, create_clearml_conf, docker_image, docker_args, labels, queue, task_id, queue_name):
template = deepcopy(self.template_dict)
template.setdefault('apiVersion', 'v1')
template['kind'] = 'Pod'
template.setdefault('metadata', {})
name = 'clearml-{queue}-id-{task_id}'.format(queue=queue_name, task_id=task_id)
template['metadata']['name'] = name
template.setdefault('spec', {})
template['spec'].setdefault('containers', [])
if labels:
labels_dict = dict(pair.split('=', 1) for pair in labels)
template['metadata'].setdefault('labels', {})
template['metadata']['labels'].update(labels_dict)
container = self._parse_docker_args(docker_args)
container_bash_script = [self.container_bash_script] if isinstance(self.container_bash_script, str) \
else self.container_bash_script
script_encoded = '\n'.join(
['#!/bin/bash', ] +
[line.format(extra_bash_init_cmd=self.extra_bash_init_script or '', task_id=task_id)
for line in container_bash_script])
create_init_script = \
"echo '{}' | base64 --decode >> ~/__start_agent__.sh ; " \
"/bin/bash ~/__start_agent__.sh".format(
base64.b64encode(
script_encoded.encode('ascii')
).decode('ascii'))
container = merge_dicts(
container,
dict(name=name, image=docker_image,
command=['/bin/bash'],
args=['-c', '{} ; {}'.format(create_clearml_conf, create_init_script)])
)
if template['spec']['containers']:
template['spec']['containers'][0] = merge_dicts(template['spec']['containers'][0], container)
else:
template['spec']['containers'].append(container)
fp, yaml_file = tempfile.mkstemp(prefix='clearml_k8stmpl_', suffix='.yml')
os.close(fp)
with open(yaml_file, 'wt') as f:
yaml.dump(template, f)
kubectl_cmd = self.KUBECTL_APPLY_CMD.format(
task_id=task_id,
docker_image=docker_image,
queue_id=queue,
namespace=self.namespace
)
# make sure we provide a list
if isinstance(kubectl_cmd, str):
kubectl_cmd = kubectl_cmd.split()
# add the template file at the end
kubectl_cmd += [yaml_file]
try:
process = subprocess.Popen(kubectl_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
except Exception as ex:
return None, str(ex)
finally:
safe_remove_file(yaml_file)
return output, error
def _kubectl_run(self, create_clearml_conf, docker_image, labels, queue, task_data, task_id, queue_name):
if callable(self.kubectl_cmd):
kubectl_cmd = self.kubectl_cmd(task_id, docker_image, queue, task_data, queue_name)
else:
kubectl_cmd = self.kubectl_cmd.format(
queue_name=queue_name,
task_id=task_id,
docker_image=docker_image,
queue_id=queue,
namespace=self.namespace,
)
# make sure we provide a list
if isinstance(kubectl_cmd, str):
kubectl_cmd = kubectl_cmd.split()
if self.overrides_json_string:
kubectl_cmd += ['--overrides=' + self.overrides_json_string]
if self.pod_limits:
kubectl_cmd += ['--limits', ",".join(self.pod_limits)]
if self.pod_requests:
kubectl_cmd += ['--requests', ",".join(self.pod_requests)]
container_bash_script = [self.container_bash_script] if isinstance(self.container_bash_script, str) \
else self.container_bash_script
container_bash_script = ' ; '.join(container_bash_script)
kubectl_cmd += [
"--labels=" + ",".join(labels),
"--command",
"--",
"/bin/sh",
"-c",
"{} ; {}".format(create_clearml_conf, container_bash_script.format(
extra_bash_init_cmd=self.extra_bash_init_script, task_id=task_id)),
]
process = subprocess.Popen(kubectl_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
return output, error
def run_tasks_loop(self, queues: List[Text], worker_params, **kwargs):
"""
:summary: Pull and run tasks from queues.
:description: 1. Go through ``queues`` by order.
2. Try getting the next task for each and run the first one that returns.
3. Go to step 1
:param queues: IDs of queues to pull tasks from
:type queues: list of ``Text``
:param worker_params: Worker command line arguments
:type worker_params: ``clearml_agent.helper.process.WorkerParams``
"""
events_service = self.get_service(Events)
# make sure we have a k8s pending queue
# noinspection PyBroadException
try:
self._session.api_client.queues.create(self.k8s_pending_queue_name)
except Exception:
pass
# get queue id
self.k8s_pending_queue_name = self._resolve_name(self.k8s_pending_queue_name, "queues")
_last_machine_update_ts = 0
while True:
# iterate over queues (priority style, queues[0] is highest)
for queue in queues:
# delete old completed / failed pods
get_bash_output(self.KUBECTL_DELETE_CMD.format(namespace=self.namespace))
# get next task in queue
try:
response = self._session.api_client.queues.get_next_task(queue=queue)
except Exception as e:
print("Warning: Could not access task queue [{}], error: {}".format(queue, e))
continue
else:
try:
task_id = response.entry.task
except AttributeError:
print("No tasks in queue {}".format(queue))
continue
events_service.send_log_events(
self.worker_id,
task_id=task_id,
lines="task {} pulled from {} by worker {}".format(
task_id, queue, self.worker_id
),
level="INFO",
)
self.report_monitor(ResourceMonitor.StatusReport(queues=queues, queue=queue, task=task_id))
self.run_one_task(queue, task_id, worker_params)
self.report_monitor(ResourceMonitor.StatusReport(queues=self.queues))
break
else:
# sleep and retry polling
print("No tasks in Queues, sleeping for {:.1f} seconds".format(self._polling_interval))
sleep(self._polling_interval)
if self._session.config["agent.reload_config"]:
self.reload_config()
def k8s_daemon(self, queue):
"""
Start the k8s Glue service.
This service will be pulling tasks from *queue* and scheduling them for execution using kubectl.
Notice all scheduled tasks are pushed back into K8S_PENDING_QUEUE,
and popped when execution actually starts. This creates full visibility into the k8s scheduler.
Manually popping a task from the K8S_PENDING_QUEUE,
will cause the k8s scheduler to skip the execution once the scheduled tasks needs to be executed
:param list(str) queue: queue name to pull from
"""
return self.daemon(queues=[ObjectID(name=queue)] if queue else None,
log_level=logging.INFO, foreground=True, docker=False)
@classmethod
def get_ssh_server_bash(cls, ssh_port_number):
return ' ; '.join(line.format(port=ssh_port_number) for line in cls.BASH_INSTALL_SSH_CMD)
|
server_setup.py | from socket import socket,AF_INET,SOCK_STREAM
from threading import Thread
from tkinter import END
class ServerSocket:
def __init__(self,receive_block_tk):
self.socket=None
self.connection_status=False
self.receive_block=receive_block_tk
self.thread=None
def initiate_connection(self,port):
port=int(port)
server_socket=socket(AF_INET,SOCK_STREAM)
server_socket.bind(('localhost',port))
server_socket.listen(1)
print('Server is ready')
self.socket,_=server_socket.accept() #return a tuple with connection socket and address
self.thread=Thread(target=self.receive_handler,args=(self.socket,self.receive_block))
self.thread.start()
def receive_handler(self,socket,receive_block):
self.connection_status=True
while self.connection_status:
try:
receive_message=socket.recv(1024)
except:
break
receive_block.delete('1.0',END)
receive_block.insert(END,receive_message.decode())
def break_connection_status(self):
self.connection_status=False
self.socket.close()
def send_message(self,send_text):
self.socket.send(send_text.encode())
|
main.py | from kivymd.app import MDApp
from kivymd.uix.screen import MDScreen
from kivy.clock import mainthread
from kivy.properties import NumericProperty
from android.permissions import Permission, request_permissions
import sync
import threading
import math
import time
# thread constants
DOWNLOAD_THREAD_COUNT = 6
THREAD_SPAWN_DELAY = 0.2
# main root widget
class MainScreen(MDScreen):
# counters for status updates
download_counter = 0
download_max = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.playlist.text = sync.load_playlists()
@mainthread
def set_in_progress(self, disabled):
# lock the ui according to the disabled parameter
self.playlist.disabled = disabled
self.exit_button.disabled = disabled
self.button.disabled = disabled
self.spinner.active = disabled
@mainthread
def set_status_text(self, text):
# set the status of the downloading
self.status.text = text
@mainthread
def status_counter_increase(self):
self.download_counter += 1
self.status.text = "Downloaded " + str(self.download_counter) + " of " + str(self.download_max)
@mainthread
def status_counter_reset(self, max):
self.download_counter = 0
self.download_max = max
self.status.text = "Downloaded " + str(self.download_counter) + " of " + str(self.download_max)
def song_download_worker(self, songs):
for song in songs:
sync.download_song(song)
self.status_counter_increase()
def download_thread(self):
# split for multiple playlist support
playlists = self.playlist.text.split(",")
# loop for multiple playlists
for playlist in playlists:
# retrieve playlist
playlist_id = playlist.strip()
# make sure it's a real user and not blank
if len(playlist_id) == 0:
continue
# retrieve songs from SynthRiderz
self.set_status_text("Retrieving downloads for " + playlist_id)
# retrieve songs from bsaber for the current page
playlist_name, songs = sync.get_sr_songs(playlist_id)
num_songs = len(songs)
# if there are songs, then download and extract them
if songs != None and num_songs > 0:
threads = []
self.status_counter_reset(num_songs)
# if there are more dongs than the thread count, then split it up
if num_songs >= DOWNLOAD_THREAD_COUNT:
chunk_size = math.floor(num_songs / DOWNLOAD_THREAD_COUNT)
for i in range(0, DOWNLOAD_THREAD_COUNT):
worker_song_list = []
if i == DOWNLOAD_THREAD_COUNT - 1:
worker_song_list = songs[(chunk_size * i):] # assign remaining songs to last worker
else:
worker_song_list = songs[(chunk_size * i):(chunk_size * (i + 1))] # extract chunk of songs for each worker
threads.append(threading.Thread(target = (self.song_download_worker), args=[worker_song_list]))
threads[-1].start()
time.sleep(THREAD_SPAWN_DELAY)
else:
# if not, just assign all songs to one thread
threads.append(threading.Thread(target = (self.song_download_worker), args=[songs]))
threads[-1].start()
# wait for workers to finish
for thread in threads:
thread.join()
else:
self.set_status_text(playlist_id + " has no songs!")
time.sleep(3)
# create playlist for this playlist
sync.create_playlist(playlist_id, playlist_name)
# done sync
self.set_status_text("Songs synchronized!")
self.set_in_progress(False)
# called when the synchronize button is pressed
def sync(self):
if len(self.playlist.text) == 0:
self.set_status_text("Please enter playlist ID!")
return
# check if path exists
sync.safe_path_check()
# save user input
sync.save_playlists(self.playlist.text)
# lock the buttons and start the master download thread
self.set_in_progress(True)
threading.Thread(target = (self.download_thread)).start()
# called when the exit button is pressed
def exit(self):
exit()
# the main application construction
class MainApp(MDApp):
def build(self):
self.theme_cls.primary_palette = "Teal"
return MainScreen()
if __name__ == "__main__":
# make sure permissions are satisfied
request_permissions([Permission.INTERNET, Permission.READ_EXTERNAL_STORAGE, Permission.WRITE_EXTERNAL_STORAGE])
# run the app
MainApp().run() |
Receive_MainEnvironmentSimulator.py | #Receive_MainEnvironmentSimulator.py
#Environment simulator + REST-Server + AdvantEDGE
#Version:1
#Date:2020-03-03
#Author: Jaime Burbano
#Description: This set of files are used to run the first simulation of the system using AdvantEDGE
#runs with python 3
#python3 MainEnvironmentSimulator v002
import argparse
import threading
import time
import queue
from datetime import datetime
import ScenarioReader #Reads the scenario.json file and gets the individual data
import VehicleMovemenManager as movement #Converts the speed of the car to X ms/s
import PoAManager#Manages the change of PoA
import logging
import TCP_Message_manager as manage
import socket
import sys
#Receives as an argument the ID of the vehicle
parser = argparse.ArgumentParser()
parser.add_argument("Vehicle_ID", help="type the vehicle ID (V001,V002...)")
args = parser.parse_args()
App_ID=args.Vehicle_ID #the given Vehicle ID must match with the one of the scenario
#Configuration of the logging file
log_path='/home/jaime/Desktop/code/AdvantEDGE/2020-04-11/code/HNA_Individual_Test_Case/loggers/'
log_file_name='HNA_TC1_TEST.log'
log_file=log_path+log_file_name
logging.basicConfig(level=logging.INFO, filename=log_file, filemode='a', format='%(name)s - %(levelname)s - %(message)s')
"""
SCRIPT VARIABLES
"""
server_ports=[30151,30152,30153,30154,30155,30156,30157,30158] #contains te port of each server the app connectes before-hand
end_flag=1 #Determines when to finish the simulation
synch_delay=1 #time to wait in minutes before running the script in order to synch all the clients
simulation_speed=10 #determines the speed of the vehicle in ms. This value must match with the time.sleep of the movement thread
msg="close_comm" #message sent to server when clossing socket (server must have the same config)
"""
QUEUE DECLARATION
"""
q_PoA = queue.LifoQueue() #Queue used to notify the thread_PoA_change
q_receive= queue.Queue() #Queue used to notify the thread_receiver
"""
SETTING THE SYSTEM
"""
my_car_info=ScenarioReader.car_info(App_ID)
my_poa_info=ScenarioReader.poa_info()
vehicle_speed=my_car_info.get_vehicle_speed()
vehicle_init_pos= my_car_info.get_car_initPosition()
max_Distance=my_poa_info.get_coverage_area()
number_APs= my_poa_info.get_number_APs()
myPoA_manager=PoAManager.PoA_Manager(max_Distance,number_APs ) #creates an object of the PoA_Manager class
"""
REGISTERING THE VEHICLE AND SETTING THE SCENARIO
"""
print ("simulating scenario for vehicle:" , App_ID)
print("poA limits: ", myPoA_manager.get_coord_coverage())
print ("the PoA is: ", myPoA_manager.Determine_AP (vehicle_init_pos))
myPoA_manager.change_PoA(App_ID, myPoA_manager.Determine_AP (vehicle_init_pos)) #call mobility event on AdvantEDGE
#Function to open the socket with a given server and start listening to it
def ConnectServers(port, host, debug=True):
# create an instance of socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (host, port) #set of ports
sock.connect(server_address)
if debug:
#print(datetime.now())
print('CONNECTED TO SERVER ', host, port, '\n')
while True:
try:
# try to receive data from the client
server_data = sock.recv(1024).decode('utf-8')
if server_data:
#All listener threads put the data in the same queue and just 1 reader gets from there
q_receive.put(server_data) #if we receive any data we put in a queue so the consumer can read it
except :
print ("clossing connection")
sock.sendall(msg.encode('utf-8'))
sock.close()
return False
#Function to consume the incomming data from the server(s)
def getdata( vehicle_ID):
myHazardList=['v0'] #initialize the variable
while True:
data_obtained=q_receive.get() #get the data from the common queue
received_time= time.time()#takes the receiving time
data = manage.getTCPdata(data_obtained) #process the header of the data
position=get_car_position() #gets the position of the car
dict_data=eval(data[0]) #Converts the received data to a dictionary
#print("received_",dict_data)
if dict_data["id"][0:4] != vehicle_ID: #Validate if the ID of the received message is equal to the actual car ID
#print (vehicle_ID, "-->", dict_data)
print ("rec: ",dict_data["id"],dict_data["sn"]) #if not the same vehicle, print the incomming hazard
#Check if the hazard has been already received from any other server
for item in myHazardList:
if item==dict_data["id"]:
log_flag=False
break
else :
log_flag=True
if log_flag:
myHazardList.append(dict_data["id"]) #put the new data in the internal list
logging.info("*{'V_ID':'%s','rec_time':'%s','h_ID':'%s','type':'%s', 'location':%s, 'server':'%s'}", vehicle_ID,received_time,dict_data["id"], dict_data['ht'],position,dict_data['sn'])
data.clear() #Clean the buffer because the data has been allready consumed
#Function used to create individual threads to connect to each server
def CreateConnection(address, port):
x0_ThreadedClient = threading.Thread(target=ConnectServers, args=(port,address,))
x0_ThreadedClient.daemon = True
x0_ThreadedClient.start()
#Function used to create the listener thread
def CreateListener(App_ID):
x0_ThreadedData = threading.Thread(target=getdata, args=(App_ID,))
x0_ThreadedData.daemon = True
x0_ThreadedData.start()
#Thread to control the movement of the vehicle
def thread_vehicle_movement(vehicle_ID, vehicle_init_pos, vehicle_speed):
global end_flag
global actual_position_car
global a1
c=0
a0=1#to check the when to change of PoA
a1=1#to check the when to change of PoA
m_Xms=movement.speed_m_mseg(vehicle_speed,simulation_speed) #Change the 2nd parameter to the number of ms
actual_position_car=vehicle_init_pos
while True:
start=time.time()
#determine if the simulation must be finished
if actual_position_car> max_Distance:
print ('---------simulation finished---------')
end_flag=0 #sets the flag to close the main file also
break
a1=myPoA_manager.Determine_AP (actual_position_car) #Determine current PoA
if a1!=a0: #Excecute just when the current AP is different to the previous
q_PoA.put(a1) #Call change of PoA thread
a0=a1 #Update value of the last registered AP
actual_position_car=actual_position_car+m_Xms #moves the car with real speed each Xms
#TODO: make sure the complete loops is 10ms --> use waitUntil
#print (actual_position_car)
time.sleep(0.01) #should be 10ms --> make sure it is always the same
end=time.time()
#print ("time: ", end-start)
#Thread to trigger the AP switching
def thread_PoA_change(vehicle_ID):
global end_flag
while True:
PoA=q_PoA.get()
myPoA_manager.change_PoA(vehicle_ID, PoA) #call mobility event ond AdvantEDGE
if end_flag==0:
break
time.sleep(0.01)
#Returns the current position of the vehicle
def get_car_position():
global actual_position_car
car_position=actual_position_car
return car_position
#--------------------------------------------------------------
#MAIN
#--------------------------------------------------------------
if __name__ == "__main__":
#SET HERE THE START TIME OF THE SIMULATION --> 2 MINUTES AFTER THE ACTUAL TIME
a = datetime.now()
actual_time= datetime.now().minute
if actual_time==59:#validates the time in case it must start at 00 of the next hour
d= datetime(datetime.now().year, datetime.now().month, datetime.now().day,datetime.now().hour+1 ,0)
else:
execution_time = actual_time+synch_delay
d= datetime(datetime.now().year, datetime.now().month, datetime.now().day,datetime.now().hour ,execution_time)
print("init time: ",d )
#CREATING TCP CONNECTION TO ALL THE REGISTERED SERVERS
#Interates trough the given servers and creates a socket to connect and listen to each one
for port in server_ports:
print ("connecting to server: ", port)
CreateConnection('127.0.0.1', port) #Creates an individual thread to listen each server
CreateListener(App_ID) #Creates the listener thread
while True: #stays in the loop until the program must start
a = datetime.now()
if d<=a :
print (a,"---------RECEIVER------------", d )
break
time.sleep(0.001)
#STARTING THE MOVEMENT THREADS
x0_vehicle_movement = threading.Thread(target=thread_vehicle_movement, args=(App_ID,vehicle_init_pos,vehicle_speed))
x0_vehicle_movement.daemon = True
x0_vehicle_movement.start()
x0_PoA_change = threading.Thread(target=thread_PoA_change, args=(App_ID,))
x0_PoA_change.daemon = True
x0_PoA_change.start()
while True:
if end_flag==0:
break
time.sleep(0.2)
|
algorunner.py | from __future__ import print_function, division, absolute_import
import time
from hkube_python_wrapper.util.DaemonThread import DaemonThread
from .statelessAlgoWrapper import statelessAlgoWrapper
from ..config import config
from .wc import WebsocketClient
from .data_adapter import DataAdapter
from .job import Job
from .methods import methods
from .messages import messages
from hkube_python_wrapper.tracing import Tracer
from hkube_python_wrapper.codeApi.hkube_api import HKubeApi
from hkube_python_wrapper.communication.DataServer import DataServer
from hkube_python_wrapper.communication.streaming.StreamingManager import StreamingManager
from hkube_python_wrapper.util.queueImpl import Queue, Empty
from hkube_python_wrapper.util.timerImpl import Timer
from hkube_python_wrapper.util.logger import log
from hkube_python_wrapper.util.stdout_redirector import stdout_redirector
import os
import sys
import importlib
import traceback
from threading import Thread, current_thread
class Algorunner(DaemonThread):
def __init__(self):
self._url = None
self._originalAlgorithm = dict()
self._statelessWrapped = dict()
self._algorithm = None
self._input = None
self._job = None
self._loadAlgorithmError = None
self._connected = False
self._hkubeApi = None
self.streamingManager = None
self._msg_queue = Queue()
self._dataAdapter = None
self._dataServer = None
self._discovery = None
self._wsc = None
self._tracer = None
self._storage = None
self._active = True
self._runningStartThread = None
self._stopped = False
self._redirectLogs = False
DaemonThread.__init__(self, "WorkerListener")
@staticmethod
def Run(start=None, init=None, stop=None, exit=None, options=None):
"""Starts the algorunner wrapper.
Convenience method to start the algorithm. Pass the algorithm methods
This method blocks forever
Args:
start (function): The entry point of the algorithm. Called for every invocation.
init (function): Optional init method. Called for every invocation before the start.
stop (function): Optional stop method. Called when the parent pipeline is stopped.
exit (function): Optional exit handler. Called before the algorithm is forced to exit.
Can be used to clean up resources.
Returns:
Never returns.
"""
algorunner = Algorunner()
if (start):
algorunner.loadAlgorithmCallbacks(
start, init=init, stop=stop, exit=exit, options=options or config)
else:
algorunner.loadAlgorithm(config)
jobs = algorunner.connectToWorker(config)
for j in jobs:
j.join()
@staticmethod
def Debug(debug_url, start, init=None, stop=None, exit=None, options=None, logs=False):
"""Starts the algorunner wrapper and registers for local run.
Convenience method to start the algorithm. Pass the algorithm methods
This method blocks forever
Args:
debug_url (string) The websocket url of the debug algorithm in the kubernetes cluster.
start (function): The entry point of the algorithm. Called for every invocation.
init (function): Optional init method. Called for every invocation before the start.
stop (function): Optional stop method. Called when the parent pipeline is stopped.
exit (function): Optional exit handler. Called before the algorithm is forced to exit.
Can be used to clean up resources.
logs (bool): Optional. If True will send logs to the cluster. Default: False
Returns:
Never returns.
"""
algorunner = Algorunner()
options = options or config
options.socket['url'] = debug_url
options.storage['mode'] = 'v1'
options.discovery["enable"] = False
options.algorithm['redirectLogs'] = logs
algorunner.loadAlgorithmCallbacks(start, init=init, stop=stop, exit=exit, options=options)
jobs = algorunner.connectToWorker(config)
for j in jobs:
j.join()
def loadAlgorithmCallbacks(self, start, init=None, stop=None, exit=None, options=None):
try:
log.info('Initializing algorithm callbacks')
self._originalAlgorithm['start'] = start
self._originalAlgorithm['init'] = init
self._originalAlgorithm['stop'] = stop
self._originalAlgorithm['exit'] = exit
for k, v in methods.items():
methodName = k
method = v
isMandatory = method["mandatory"]
if self._originalAlgorithm[methodName] is not None:
log.info('found method {methodName}', methodName=methodName)
else:
mandatory = "mandatory" if isMandatory else "optional"
error = 'unable to find {mandatory} method {methodName}'.format(
mandatory=mandatory, methodName=methodName)
if (isMandatory):
raise Exception(error)
log.warning(error)
# fix start if it has only one argument
if start.__code__.co_argcount == 1:
self._originalAlgorithm['start'] = lambda args, api: start(args)
self._wrapStateless()
self._tracer = Tracer(options.tracer)
except Exception as e:
self._loadAlgorithmError = self._errorMsg(e)
log.error(e)
@staticmethod
def _getEntryPoint(entry):
splits = os.path.splitext(entry)
entryPoint = splits[0] if splits[-1] == '.py' else entry
entryPoint = entryPoint.replace("/", ".")
return entryPoint
def loadAlgorithm(self, options):
try:
cwd = os.getcwd()
algOptions = options.algorithm
package = algOptions["path"]
entry = algOptions["entryPoint"]
entryPoint = Algorunner._getEntryPoint(entry)
__import__(package)
os.chdir('{cwd}/{package}'.format(cwd=cwd, package=package))
log.info('loading {entry}', entry=entry)
mod = importlib.import_module('.{entryPoint}'.format(
entryPoint=entryPoint), package=package)
log.info('algorithm code loaded')
for k, v in methods.items():
methodName = k
method = v
isMandatory = method["mandatory"]
try:
self._originalAlgorithm[methodName] = getattr(mod, methodName)
# fix start if it has only one argument
if methodName == 'start' and self._originalAlgorithm['start'].__code__.co_argcount == 1:
self._originalAlgorithm['startOrig'] = self._originalAlgorithm['start']
self._originalAlgorithm['start'] = lambda args, api: self._originalAlgorithm['startOrig'](
args)
log.info('found method {methodName}', methodName=methodName)
except Exception as e:
mandatory = "mandatory" if isMandatory else "optional"
error = 'unable to find {mandatory} method {methodName}'.format(
mandatory=mandatory, methodName=methodName)
if (isMandatory):
raise Exception(error)
log.warning(error)
self._wrapStateless()
self._tracer = Tracer(options.tracer)
except Exception as e:
self._loadAlgorithmError = self._errorMsg(e)
traceback.print_exc()
log.error(e)
def _wrapStateless(self):
wrapper = statelessAlgoWrapper(self._originalAlgorithm)
self._statelessWrapped['start'] = wrapper.start
self._statelessWrapped['init'] = wrapper.init
self._statelessWrapped['stop'] = wrapper.stop
self._statelessWrapped['exit'] = wrapper.exit
def connectToWorker(self, options):
socket = options.socket
encoding = socket.get("encoding")
self._storage = options.storage.get("mode")
self._redirectLogs = options.algorithm.get("redirectLogs")
url = socket.get("url")
if (url is not None):
self._url = url
else:
self._url = '{protocol}://{host}:{port}'.format(**socket)
self._url += '?storage={storage}&encoding={encoding}'.format(
storage=self._storage, encoding=encoding)
self._wsc = WebsocketClient(self._msg_queue, encoding, self._url)
self._initDataAdapter(options)
self.streamingManager = StreamingManager()
self._hkubeApi = HKubeApi(self._wsc, self, self._dataAdapter, self._storage, self.streamingManager)
self._registerToWorkerEvents()
log.info('connecting to {url}', url=self._url)
self._wsc.start()
self.start()
return [self._wsc, self]
def handle(self, command, data):
if command == messages.incoming.initialize:
self._init(data)
elif command == messages.incoming.start:
self._start(data)
elif command == messages.incoming.stop:
self._stopAlgorithm(data)
elif command == messages.incoming.streamingInMessage:
self.streamingManager.onMessage(messageFlowPattern=None, msg=data['payload'], origin=data['origin'], sendMessageId=data['sendMessageId'])
self._wsc.send({'command': messages.outgoing.streamingInMessageDone, 'data': {'sendMessageId': data['sendMessageId']}})
elif command == messages.incoming.serviceDiscoveryUpdate:
self._discovery_update(data)
elif command == messages.incoming.exit:
# call exit on different thread to prevent deadlock
Timer(0.1, lambda: self._exit(data), name="Exit timer").start()
elif command in [messages.incoming.algorithmExecutionDone, messages.incoming.algorithmExecutionError]:
self._hkubeApi.algorithmExecutionDone(data)
elif command in [messages.incoming.subPipelineDone, messages.incoming.subPipelineError, messages.incoming.subPipelineStopped]:
self._hkubeApi.subPipelineDone(data)
elif command == messages.incoming.dataSourceResponse:
self._hkubeApi.dataSourceResponse(data)
def get_message(self, blocking=True):
return self._msg_queue.get(block=blocking, timeout=0.1)
def run(self):
while self._active:
try:
(command, data) = self.get_message()
runThread = Thread(name=command + "Thread", target=self.handle, args=[command, data])
runThread.daemon = True
runThread.start()
except Empty:
pass
log.info('Exiting run loop')
def close(self):
if (self._wsc):
self._wsc.shutDown()
self._active = False
def getCurrentJob(self):
return self._job
def _initDataServer(self, options):
enable = options.discovery.get("enable")
if (enable and self._storage != 'v1' and self._job is not None):
if (self._job.isStreaming and self._dataServer is not None):
self._dataServer.shutDown()
self._dataServer = None
elif (not self._job.isStreaming and self._dataServer is None):
self._discovery = {
'host': options.discovery.get("host"),
'port': options.discovery.get("port")
}
self._dataServer = DataServer(options.discovery)
self._dataServer.listen()
self._reportServing(interval=options.discovery.get("servingReportInterval"))
def _initDataAdapter(self, options):
self._dataAdapter = DataAdapter(options, self._dataServer)
def _registerToWorkerEvents(self):
self._wsc.events.on_connection += self._connection
self._wsc.events.on_disconnect += self._disconnect
def _connection(self):
self._connected = True
log.info('connected to {url}', url=self._url)
def _disconnect(self):
if self._connected:
log.info('disconnected from {url}', url=self._url)
self._connected = False
def _log_message(self, data):
try:
self._sendCommand(messages.outgoing.logData, data)
except Exception:
pass
def _getMethod(self, name):
return self._algorithm.get(name)
def _init(self, options):
redirector = None
try:
if (self._redirectLogs):
redirector = stdout_redirector()
redirector.events.on_data += self._log_message
if (self._loadAlgorithmError):
self.sendError(self._loadAlgorithmError)
else:
self._input = options
self._job = Job(options)
if self._job.isStreaming and not self._job.isStateful:
self._algorithm = self._statelessWrapped
else:
self._algorithm = self._originalAlgorithm
method = self._getMethod('init')
if (method is not None):
method(options)
self._sendCommand(messages.outgoing.initialized, None)
except Exception as e:
self.sendError(e)
finally:
if (redirector):
redirector.flush()
redirector.events.on_data -= self._log_message
redirector.cleanup()
def _discovery_update(self, discovery):
log.debug('Got discovery update {discovery}', discovery=discovery)
messageListenerConfig = {'encoding': config.discovery['encoding']}
self.streamingManager.setupStreamingListeners(
messageListenerConfig, discovery, self._job.nodeName)
def _setupStreamingProducer(self, nodeName):
def onStatistics(statistics):
self._sendCommand(messages.outgoing.streamingStatistics, statistics)
producerConfig = {}
producerConfig["port"] = config.discovery['streaming']['port']
producerConfig['messagesMemoryBuff'] = config.discovery['streaming']['messagesMemoryBuff']
producerConfig['encoding'] = config.discovery['encoding']
producerConfig['statisticsInterval'] = config.discovery['streaming']['statisticsInterval']
self.streamingManager.setupStreamingProducer(
onStatistics, producerConfig, self._job.childs, nodeName)
def _start(self, options):
if (self._job.isStreaming):
self.streamingManager.setParsedFlows(self._job.parsedFlow, self._job.defaultFlow)
if (self._job.childs):
self._setupStreamingProducer(self._job.nodeName)
self.streamingManager.clearMessageListeners()
# pylint: disable=unused-argument
span = None
self._initDataServer(config)
self._runningStartThread = current_thread()
self._stopped = False
redirector = None
try:
self._sendCommand(messages.outgoing.started, None)
if (self._redirectLogs):
redirector = stdout_redirector()
redirector.events.on_data += self._log_message
# TODO: add parent span from worker
jobId = self._job.jobId
taskId = self._job.taskId
nodeName = self._job.nodeName
info = self._job.info or {}
savePaths = info.get("savePaths", [])
if (options):
topSpan = options.get('spanId')
else:
topSpan = self._job.spanId
span = Tracer.instance.create_span("start", topSpan, jobId, taskId, nodeName)
newInput = self._dataAdapter.getData(self._input)
self._input.update({'input': newInput})
method = self._getMethod('start')
algorithmData = method(self._input, self._hkubeApi)
if not (self._stopped):
self._handle_response(algorithmData, jobId, taskId, nodeName, savePaths, span)
except Exception as e:
traceback.print_exc()
Tracer.instance.finish_span(span, e)
self.sendError(e)
finally:
if (redirector):
redirector.flush()
redirector.events.on_data -= self._log_message
redirector.cleanup()
self._runningStartThread = None
def _handle_response(self, algorithmData, jobId, taskId, nodeName, savePaths, span):
if (self._storage == 'v3'):
self._handle_responseV2_V3(algorithmData, jobId, taskId, nodeName, savePaths, span)
else:
self._handle_responseV1(algorithmData, span)
def _handle_responseV1(self, algorithmData, span):
if (span):
Tracer.instance.finish_span(span)
self._sendCommand(messages.outgoing.done, algorithmData)
def _handle_responseV2_V3(self, algorithmData, jobId, taskId, nodeName, savePaths, span):
header, encodedData = self._dataAdapter.encode(algorithmData)
data = {
'jobId': jobId,
'taskId': taskId,
'nodeName': nodeName,
'data': algorithmData,
'encodedData': encodedData,
'savePaths': savePaths
}
storingData = dict()
storageInfo = self._dataAdapter.createStorageInfo(data)
storingData.update(storageInfo)
incache = None
if (self._dataServer and savePaths):
incache = self._dataServer.setSendingState(taskId, header, encodedData, len(encodedData))
if (incache):
storingData.update({'discovery': self._discovery, 'taskId': taskId})
self._sendCommand(messages.outgoing.storing, storingData)
self._dataAdapter.setData({'jobId': jobId, 'taskId': taskId, 'header': header, 'data': encodedData})
else:
self._dataAdapter.setData({'jobId': jobId, 'taskId': taskId, 'header': header, 'data': encodedData})
self._sendCommand(messages.outgoing.storing, storingData)
if (span):
Tracer.instance.finish_span(span)
if (self._job.isStreaming):
self._hkubeApi.stopStreaming(force=False)
self._sendCommand(messages.outgoing.done, None)
def _reportServing(self, interval=None):
if (interval is None):
return
interval = interval / 1000
def reportInterval():
if (not self._active or not self._dataServer):
return
self._reportServingStatus()
Timer(interval, reportInterval, name='reportIntervalTimer').start()
reportInterval()
def _reportServingStatus(self):
if (self._dataServer):
isServing = self._dataServer.isServing()
if (isServing):
self._sendCommand(messages.outgoing.servingStatus, True)
def _stopAlgorithm(self, options):
if (self._stopped):
log.warning('Got stop command while already stopping')
else:
self._stopped = True
try:
method = self._getMethod('stop')
if (method is not None):
method(options)
forceStop = options.get('forceStop', False)
if (forceStop is True):
log.info('stopping using force flag')
else:
log.info('stopping gracefully')
if (self._job.isStreaming):
if (forceStop is False):
stoppingState = True
def stopping():
while (stoppingState):
self._sendCommand(messages.outgoing.stopping, None)
time.sleep(1)
stoppingThread = Thread(target=stopping)
stoppingThread.start()
self._hkubeApi.stopStreaming(force=forceStop)
stoppingState = False
stoppingThread.join()
log.info('Joined threads send stopping and stop streaming')
self._checkQueueSize(event='scale down')
else:
self._hkubeApi.stopStreaming(force=forceStop)
if (self._runningStartThread):
self._runningStartThread.join()
log.info('Joined threads algorithm and stop algorithm')
self._sendCommand(messages.outgoing.stopped, None)
except Exception as e:
self.sendError(e)
def _exit(self, options):
try:
self._dataServer and self._dataServer.shutDown()
self._wsc.shutDown()
method = self._getMethod('exit')
if (method is not None):
method(options)
self._checkQueueSize(event='exit')
option = options if options is not None else dict()
code = option.get('exitCode', 0)
self._active = False
log.info('Got exit command. Exiting with code {code}', code=code)
sys.exit(code)
except Exception as e:
log.error('Got error during exit: {e}', e=e)
# pylint: disable=protected-access
os._exit(0)
def _checkQueueSize(self, event):
if (self._job.isStreaming):
if (self.streamingManager.messageProducer):
try:
log.info('Messages left in queue on {event}={queue}', event=event, queue=str(len(self.streamingManager.messageProducer.adapter.messageQueue.queue)))
except Exception:
log.error('Failed to print number of messages left in queue on {event}', event=event)
else:
log.info('MessageProducer already None on {event}', event=event)
def _sendCommand(self, command, data):
try:
self._wsc.send({'command': command, 'data': data})
except Exception as e:
self.sendError(e)
def sendError(self, error):
try:
log.error(error)
self._wsc.send({
'command': messages.outgoing.error,
'error': {
'code': 'Failed',
'message': self._errorMsg(error)
}
})
if (self._job.isStreaming):
self._hkubeApi.stopStreaming(False)
except Exception as e:
log.error(e)
def _errorMsg(self, error):
return str(error)
|
reporting_server.py | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import multiprocessing
import os
import re
import signal
import socket
import sys
from pants import binary_util
from pants.backend.core.tasks.task import QuietTaskMixin, Task
from pants.base.build_environment import get_buildroot
from pants.base.run_info import RunInfo
from pants.reporting.reporting_server import ReportingServer, ReportingServerManager
class RunServer(Task, QuietTaskMixin):
"""Runs the reporting server."""
@classmethod
def register_options(cls, register):
super(RunServer, cls).register_options(register)
register('--port', type=int, default=0,
help='Serve on this port. Leave unset to choose a free port '
'automatically (recommended if using pants concurrently in '
'multiple workspaces on the same host).')
register('--allowed-clients', action='append', default=['127.0.0.1'],
help='Only requests from these IPs may access this server. Useful for '
'temporarily showing build results to a colleague. The special '
'value ALL means any client may connect. Use with caution, as '
'your source code is exposed to all allowed clients!')
register('--open', action='store_true', default=False,
help='Attempt to open the server web ui in a browser.')
def execute(self):
DONE = '__done_reporting'
def maybe_open(port):
if self.get_options().open:
binary_util.ui_open('http://localhost:%d' % port)
port = ReportingServerManager.get_current_server_port()
if port:
maybe_open(port)
print('Server already running at http://localhost:%d' % port, file=sys.stderr)
return
def run_server(reporting_queue):
def report_launch(actual_port):
reporting_queue.put(
'Launching server with pid %d at http://localhost:%d' % (os.getpid(), actual_port))
def done_reporting():
reporting_queue.put(DONE)
try:
# We mustn't block in the child, because the multiprocessing module enforces that the
# parent either kills or joins to it. Instead we fork a grandchild that inherits the queue
# but is allowed to block indefinitely on the server loop.
if not os.fork():
# Child process.
info_dir = RunInfo.dir(self.context.config)
# If these are specified explicitly in the config, use those. Otherwise
# they will be None, and we'll use the ones baked into this package.
template_dir = self.context.config.get('reporting', 'reports_template_dir')
assets_dir = self.context.config.get('reporting', 'reports_assets_dir')
settings = ReportingServer.Settings(info_dir=info_dir, template_dir=template_dir,
assets_dir=assets_dir, root=get_buildroot(),
allowed_clients=self.get_options().allowed_clients)
server = ReportingServer(self.get_options().port, settings)
actual_port = server.server_port()
ReportingServerManager.save_current_server_port(actual_port)
report_launch(actual_port)
done_reporting()
# Block forever here.
server.start()
except socket.error:
done_reporting()
raise
# We do reporting on behalf of the child process (necessary, since reporting may be buffered in
# a background thread). We use multiprocessing.Process() to spawn the child so we can use that
# module's inter-process Queue implementation.
reporting_queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=run_server, args=[reporting_queue])
proc.daemon = True
proc.start()
s = reporting_queue.get()
while s != DONE:
print(s, file=sys.stderr)
s = reporting_queue.get()
# The child process is done reporting, and is now in the server loop, so we can proceed.
server_port = ReportingServerManager.get_current_server_port()
maybe_open(server_port)
class KillServer(Task, QuietTaskMixin):
"""Kills the reporting server."""
pidfile_re = re.compile(r'port_(\d+)\.pid')
def execute(self):
pidfiles_and_ports = ReportingServerManager.get_current_server_pidfiles_and_ports()
if not pidfiles_and_ports:
print('No server found.', file=sys.stderr)
# There should only be one pidfile, but in case there are many, we kill them all here.
for pidfile, port in pidfiles_and_ports:
with open(pidfile, 'r') as infile:
pidstr = infile.read()
try:
os.unlink(pidfile)
pid = int(pidstr)
os.kill(pid, signal.SIGKILL)
print('Killed server with pid %d at http://localhost:%d' % (pid, port), file=sys.stderr)
except (ValueError, OSError):
pass
|
tincam.py | from dropbox import Dropbox
from picamera import PiCamera as Camera
from gpiozero import Button
from datetime import datetime
import os
import time
import threading
button = Button(4)
camera = Camera()
upload_queue = []
queue_lock = threading.Lock()
DBX_API_KEY = 'YOUR_DROPBOX_API_KEY_HERE'
dbx = Dropbox(DBX_API_KEY)
def wait_for_press():
raw_input('press: ')
def wait_for_release():
raw_input('release: ')
def main():
upload_thread = threading.Thread(target=upload_worker)
connect_thread = threading.Thread(target=dropbox_connect_worker)
upload_thread.start()
connect_thread.start()
while True:
print('Press for photo capture stream')
print('Hold and release for video')
button.wait_for_press()
startTime = time.time()
button.wait_for_release()
elapsedTime = time.time() - startTime
if elapsedTime > 1:
print('Video capture started, press to halt')
process_video_capture()
else:
print('Photo stream started, press to halt')
process_photo_stream()
upload_thread.stop()
def dropbox_connect_worker():
dropbox_connected = False
while dropbox_connected is False:
try:
print('Attempting Dropbox connection...')
dbx.users_get_current_account()
dropbox_connected = True
print('Dropbox connected')
except:
print('Dropbox failed to connect, retrying...')
time.sleep(3)
def upload_worker():
while True:
if not upload_queue:
time.sleep(2)
continue
target_filename = None
with queue_lock:
target_filename = upload_queue[0]
print('Attempting to upload ' + target_filename)
target_file = open(target_filename, 'r')
try:
dbx.files_upload(target_file, '/' + target_filename)
with queue_lock:
upload_queue.pop(0)
try:
os.remove(target_filename)
except:
pass
print('Upload successful')
except:
print('Upload failed, will retry later.')
time.sleep(2)
def get_timestamp():
return datetime.now().isoformat()
def get_filename(extension):
return get_timestamp() + '.' + extension
def process_video_capture():
camera.resolution = (640, 480)
filename = get_filename('h264')
camera.start_recording(filename)
button.wait_for_press()
camera.stop_recording()
button.wait_for_release()
with queue_lock:
upload_queue.append(filename)
print('Video captured to ' + filename)
def process_photo_stream():
camera.resolution = (1280, 1024)
capture_enabled = True
while capture_enabled:
filename = get_filename('jpg')
camera.capture(filename)
with queue_lock:
upload_queue.append(filename)
print('Photo captured to ' + filename)
time.sleep(1)
if button.is_pressed:
capture_enabled = False
button.wait_for_release()
if __name__ == "__main__":
main()
|
manager.py | #!/usr/bin/env python
import os
# check if NEOS update is required
while 1:
if ((not os.path.isfile("/VERSION")
or int(open("/VERSION").read()) < 3)
and not os.path.isfile("/data/media/0/noupdate")):
os.system("curl -o /tmp/updater https://openpilot.comma.ai/updater && chmod +x /tmp/updater && /tmp/updater")
else:
break
import sys
import time
import importlib
import subprocess
import signal
import traceback
import usb1
from multiprocessing import Process
from selfdrive.services import service_list
import hashlib
import zmq
from setproctitle import setproctitle
from selfdrive.swaglog import cloudlog
import selfdrive.messaging as messaging
from selfdrive.thermal import read_thermal
from selfdrive.registration import register
from selfdrive.version import version
import common.crash as crash
from common.params import Params
from selfdrive.loggerd.config import ROOT
# comment out anything you don't want to run
managed_processes = {
"uploader": "selfdrive.loggerd.uploader",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
# "radard": "selfdrive.controls.radard",
"loggerd": ("loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("logcatd", ["./logcatd"]),
"proclogd": ("proclogd", ["./proclogd"]),
"boardd": ("boardd", ["./boardd"]), # switch to c++ boardd
"ui": ("ui", ["./ui"]),
"visiond": ("visiond", ["./visiond"]),
"sensord": ("sensord", ["./sensord"]), }
running = {}
# due to qualcomm kernel bugs SIGKILLing visiond sometimes causes page table corruption
unkillable_processes = ['visiond']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes = []
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'sensord',
'radard',
'visiond',
'proclogd',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes
print "registering", name
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
# ****************** process management functions ******************
def launcher(proc, gctx):
try:
# import the process
mod = importlib.import_module(proc)
# rename the process
setproctitle(proc)
# exec the process
mod.main(gctx)
except KeyboardInterrupt:
cloudlog.info("child %s got ctrl-c" % proc)
except Exception:
# can't install the crash handler becuase sys.excepthook doesn't play nice
# with threads, so catch it here.
crash.capture_exception()
raise
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, basestring):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc, gctx))
else:
pdir, pargs = proc
cwd = os.path.dirname(os.path.realpath(__file__))
if pdir is not None:
cwd = os.path.join(cwd, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
else:
running[name].terminate()
# give it 5 seconds to die
running[name].join(5.0)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
running[name].join(15.0)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date > /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
manage_baseui(False)
for name in running.keys():
kill_managed_process(name)
sys.exit(0)
baseui_running = False
def manage_baseui(start):
global baseui_running
if start and not baseui_running:
cloudlog.info("starting baseui")
os.system("am start -n com.baseui/.MainActivity")
baseui_running = True
elif not start and baseui_running:
cloudlog.info("stopping baseui")
os.system("am force-stop com.baseui")
baseui_running = False
# ****************** run loop ******************
def manager_init():
global gctx
reg_res = register()
if reg_res:
dongle_id, dongle_secret = reg_res
else:
raise Exception("server registration failed")
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
dirty = subprocess.call(["git", "diff-index", "--quiet", "origin/release", "--"]) != 0
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty)
os.umask(0)
try:
os.mkdir(ROOT, 0777)
except OSError:
pass
# set gctx
gctx = {}
def system(cmd):
try:
cloudlog.info("running %s" % cmd)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError, e:
cloudlog.event("running failed",
cmd=e.cmd,
output=e.output,
returncode=e.returncode)
def manager_thread():
global baseui_running
# now loop
context = zmq.Context()
thermal_sock = messaging.pub_sock(context, service_list['thermal'].port)
health_sock = messaging.sub_sock(context, service_list['health'].port)
cloudlog.info("manager start")
cloudlog.info(dict(os.environ))
start_managed_process("logmessaged")
start_managed_process("logcatd")
start_managed_process("tombstoned")
start_managed_process("uploader")
start_managed_process("ui")
manage_baseui(True)
panda = False
if os.getenv("NOBOARD") is None:
# *** wait for the board ***
panda = wait_for_device() == 0x2300
# flash the device
if os.getenv("NOPROG") is None:
# checkout the matching panda repo
rootdir = os.path.dirname(os.path.abspath(__file__))
system("cd %s && git submodule init" % rootdir)
system("cd %s && git submodule update" % rootdir)
# flash the board
boarddir = os.path.dirname(os.path.abspath(__file__))+"/../panda/board/"
mkfile = "Makefile" if panda else "Makefile.legacy"
print "using", mkfile
system("cd %s && make -f %s" % (boarddir, mkfile))
start_managed_process("boardd")
started = False
logger_dead = False
count = 0
# set 5 second timeout on health socket
# 5x slower than expected
health_sock.RCVTIMEO = 5000
while 1:
# get health of board, log this in "thermal"
td = messaging.recv_sock(health_sock, wait=True)
print td
# replace thermald
msg = read_thermal()
# loggerd is gated based on free space
statvfs = os.statvfs(ROOT)
avail = (statvfs.f_bavail * 1.0)/statvfs.f_blocks
# thermal message now also includes free space
msg.thermal.freeSpace = avail
with open("/sys/class/power_supply/battery/capacity") as f:
msg.thermal.batteryPercent = int(f.read())
with open("/sys/class/power_supply/battery/status") as f:
msg.thermal.batteryStatus = f.read().strip()
thermal_sock.send(msg.to_bytes())
print msg
# TODO: add car battery voltage check
max_temp = max(msg.thermal.cpu0, msg.thermal.cpu1,
msg.thermal.cpu2, msg.thermal.cpu3) / 10.0
# uploader is gated based on the phone temperature
if max_temp > 85.0:
cloudlog.info("over temp: %r", max_temp)
kill_managed_process("uploader")
elif max_temp < 70.0:
start_managed_process("uploader")
if avail < 0.05:
logger_dead = True
# start constellation of processes when the car starts
# with 2% left, we killall, otherwise the phone is bricked
if td is not None and td.health.started and avail > 0.02:
if not started:
Params().car_start()
started = True
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
manage_baseui(False)
else:
manage_baseui(True)
started = False
logger_dead = False
for p in car_started_processes:
kill_managed_process(p)
# shutdown if the battery gets lower than 10%, we aren't running, and we are discharging
if msg.thermal.batteryPercent < 5 and msg.thermal.batteryStatus == "Discharging":
os.system('LD_LIBRARY_PATH="" svc power shutdown')
# check the status of baseui
baseui_running = 'com.baseui' in subprocess.check_output(["ps"])
# check the status of all processes, did any of them die?
for p in running:
cloudlog.debug(" running %s %s" % (p, running[p]))
# report to server once per minute
if (count%60) == 0:
cloudlog.event("STATUS_PACKET",
running=running.keys(),
count=count,
health=(td.to_dict() if td else None),
thermal=msg.to_dict())
count += 1
def get_installed_apks():
dat = subprocess.check_output(["pm", "list", "packages", "-3", "-f"]).strip().split("\n")
ret = {}
for x in dat:
if x.startswith("package:"):
v,k = x.split("package:")[1].split("=")
ret[k] = v
return ret
# optional, build the c++ binaries and preimport the python for speed
def manager_prepare():
# build cereal first
subprocess.check_call(["make", "-j4"], cwd="../cereal")
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
for p in managed_processes:
proc = managed_processes[p]
if isinstance(proc, basestring):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
else:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=proc[0])
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.info("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=proc[0])
subprocess.check_call(["make", "-j4"], cwd=proc[0])
# install apks
installed = get_installed_apks()
for app in os.listdir("../apk/"):
if ".apk" in app:
app = app.split(".apk")[0]
if app not in installed:
installed[app] = None
cloudlog.info("installed apks %s" % (str(installed), ))
for app in installed:
apk_path = "../apk/"+app+".apk"
if os.path.isfile(apk_path):
h1 = hashlib.sha1(open(apk_path).read()).hexdigest()
h2 = None
if installed[app] is not None:
h2 = hashlib.sha1(open(installed[app]).read()).hexdigest()
cloudlog.info("comparing version of %s %s vs %s" % (app, h1, h2))
if h2 is None or h1 != h2:
cloudlog.info("installing %s" % app)
for do_uninstall in [False, True]:
if do_uninstall:
cloudlog.info("needing to uninstall %s" % app)
os.system("pm uninstall %s" % app)
ret = os.system("cp %s /sdcard/%s.apk && pm install -r /sdcard/%s.apk && rm /sdcard/%s.apk" % (apk_path, app, app, app))
if ret == 0:
break
assert ret == 0
def wait_for_device():
while 1:
try:
context = usb1.USBContext()
for device in context.getDeviceList(skip_on_error=True):
if (device.getVendorID() == 0xbbaa and device.getProductID() == 0xddcc) or \
(device.getVendorID() == 0x0483 and device.getProductID() == 0xdf11):
bcd = device.getbcdDevice()
handle = device.open()
handle.claimInterface(0)
cloudlog.info("found board")
handle.close()
return bcd
except Exception as e:
print "exception", e,
print "waiting..."
time.sleep(1)
def main():
if os.getenv("NOLOG") is not None:
del managed_processes['loggerd']
del managed_processes['tombstoned']
if os.getenv("NOUPLOAD") is not None:
del managed_processes['uploader']
if os.getenv("NOVISION") is not None:
del managed_processes['visiond']
if os.getenv("NOBOARD") is not None:
del managed_processes['boardd']
if os.getenv("LEAN") is not None:
del managed_processes['uploader']
del managed_processes['loggerd']
del managed_processes['logmessaged']
del managed_processes['logcatd']
del managed_processes['tombstoned']
del managed_processes['proclogd']
if os.getenv("NOCONTROL") is not None:
del managed_processes['controlsd']
del managed_processes['radard']
# support additional internal only extensions
try:
import selfdrive.manager_extensions
selfdrive.manager_extensions.register(register_managed_process)
except ImportError:
pass
params = Params()
params.manager_start()
# set unset params
if params.get("IsMetric") is None:
params.put("IsMetric", "0")
if params.get("IsRearViewMirror") is None:
params.put("IsRearViewMirror", "1")
manager_init()
manager_prepare()
if os.getenv("PREPAREONLY") is not None:
sys.exit(0)
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if __name__ == "__main__":
main()
|
Hiwin_RT605_ArmCommand_Socket_20190627191856.py | #!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
class ConnManager(object):
def __init__(self,host,port,timeout)
self._conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
self._conn.connect((host, port))
def sendOne(self, data):
self._conn.send(data)
def sendTwo(self, data):
self._conn.send(data)
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
#Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket,arm_mode_flag,data
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", Socket)
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
##-----------socket client--------
def socket_client():
global Socket
try:
Socket_connect = ConnManager('192.168.0.1',8080,5)
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
print(Socket.recv(1024))
Socket_feedback(Socket)
# while 1:
# feedback_str = Socket.recv(1024)
# #手臂端傳送手臂狀態
# if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
# state_feedback.ArmState = 0
# if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
# state_feedback.ArmState = 1
# if str(feedback_str[2]) == '54':# 6 策略完成
# state_feedback.ArmState = 6
# print("shutdown")
# #確認傳送旗標
# if str(feedback_str[4]) == '48':#回傳0 false
# state_feedback.SentFlag = 0
# if str(feedback_str[4]) == '49':#回傳1 true
# state_feedback.SentFlag = 1
# ##---------------socket 傳輸手臂命令 end-----------------
# if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
# break
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback(s):
Socket = s
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
origami.py | from __future__ import division
import sublime, sublime_plugin
import time
import threading
import copy
from functools import partial
XMIN, YMIN, XMAX, YMAX = list(range(4))
try:
# Do not import State directly to not break us in case the MaxPane.max_pane module is reloaded
import MaxPane
except ImportError as error:
print('Origami Error: Could not import the MaxPane package!', error)
class MaxPane(object):
class max_pane(object):
class State(object):
is_fixing_layout = False
def increment_if_greater_or_equal(x, threshold):
if x >= threshold:
return x+1
return x
def decrement_if_greater(x, threshold):
if x > threshold:
return x-1
return x
def pull_up_cells_after(cells, threshold):
return [ [x0,decrement_if_greater(y0, threshold),
x1,decrement_if_greater(y1, threshold)] for (x0,y0,x1,y1) in cells]
def push_right_cells_after(cells, threshold):
return [ [increment_if_greater_or_equal(x0, threshold),y0,
increment_if_greater_or_equal(x1, threshold),y1] for (x0,y0,x1,y1) in cells]
def push_down_cells_after(cells, threshold):
return [ [x0,increment_if_greater_or_equal(y0, threshold),
x1,increment_if_greater_or_equal(y1, threshold)] for (x0,y0,x1,y1) in cells]
def pull_left_cells_after(cells, threshold):
return [ [decrement_if_greater(x0, threshold),y0,
decrement_if_greater(x1, threshold),y1] for (x0,y0,x1,y1) in cells]
def opposite_direction(direction):
opposites = {'up':'down', 'right':'left', 'down':'up', 'left':'right'}
return opposites[direction]
def cells_adjacent_to_cell_in_direction(cells, cell, direction):
fn = None
if direction == 'up':
fn = lambda orig, check: orig[YMIN] == check[YMAX]
elif direction == 'right':
fn = lambda orig, check: orig[XMAX] == check[XMIN]
elif direction == 'down':
fn = lambda orig, check: orig[YMAX] == check[YMIN]
elif direction == 'left':
fn = lambda orig, check: orig[XMIN] == check[XMAX]
if fn:
return [c for c in cells if fn(cell, c)]
return None
def fixed_set_layout(window, layout):
#A bug was introduced in Sublime Text 3, sometime before 3053, in that it
#changes the active group to 0 when the layout is changed. Annoying.
MaxPane.max_pane.State.is_fixing_layout = True
active_group = window.active_group()
window.run_command('set_layout', layout)
num_groups = len(layout['cells'])
window.focus_group(min(active_group, num_groups-1))
MaxPane.max_pane.State.is_fixing_layout = False
def fixed_set_layout_no_focus_change(window, layout):
window.run_command('set_layout', layout)
def is_pane_zoomed(window):
return window.settings().get( 'original_panes_layout' )
def maximize_pane(window, fraction):
if fraction:
window.run_command( 'zoom_pane', { 'fraction': fraction } )
else:
window.run_command( 'maximize_pane' )
def unmaximize_pane(window):
settings = window.settings()
if settings.get( 'origami_fraction' ):
window.run_command( 'unzoom_pane' )
else:
window.run_command( 'unmaximize_pane' )
def run_unzoomed(self, target_function):
has_zoom = self.has_zoom()
window = self.window
if has_zoom:
settings = window.settings()
unmaximize_pane( window )
def try_create(attempt):
if attempt < 0:
print( "Origami Error: Timed out to create the pane..." )
return
has_zoom = self.has_zoom()
if has_zoom:
sublime.set_timeout( lambda: try_create( attempt - 1 ), 100 )
else:
target_function()
try_create( 15 )
else:
target_function()
class WithSettings:
_settings = None
def settings(self):
if self._settings is None:
self._settings = sublime.load_settings('Origami.sublime-settings')
return self._settings
class PaneCommand(sublime_plugin.WindowCommand, WithSettings):
""" Abstract base class for commands. """
def layout(self):
layout = self.window.layout()
cells = layout['cells']
rows = layout['rows']
cols = layout['cols']
return rows, cols, cells
def get_cells(self):
return self.layout()[2]
def adjacent_cell(self, direction):
cells = self.get_cells()
current_cell = cells[self.window.active_group()]
adjacent_cells = cells_adjacent_to_cell_in_direction(cells, current_cell, direction)
rows, cols, _ = self.layout()
if direction in ['left', 'right']:
MIN, MAX, fields = YMIN, YMAX, rows
else: #up or down
MIN, MAX, fields = XMIN, XMAX, cols
cell_overlap = []
for cell in adjacent_cells:
start = max(fields[cell[MIN]], fields[current_cell[MIN]])
end = min(fields[cell[MAX]], fields[current_cell[MAX]])
overlap = (end - start)# / (fields[cell[MAX]] - fields[cell[MIN]])
cell_overlap.append(overlap)
if len(cell_overlap) != 0:
cell_index = cell_overlap.index(max(cell_overlap))
return adjacent_cells[cell_index]
return None
def duplicated_views(self, original_group, duplicating_group):
original_views = self.window.views_in_group(original_group)
original_buffers = {v.buffer_id() for v in original_views}
potential_dupe_views = self.window.views_in_group(duplicating_group)
dupe_views = []
for view_to_remove in potential_dupe_views:
if view_to_remove.buffer_id() in original_buffers:
dupe_views.append(view_to_remove)
return dupe_views
@classmethod
def tabless_views(cls, window, duplicating_group):
potential_dupe_views = window.views_in_group(duplicating_group)
dupe_views = []
for view_to_remove in potential_dupe_views:
# print('tabless_views file_name', view_to_remove.file_name(), 'size', view_to_remove.size(), 'name', view_to_remove.name(), 'is_dirty', view_to_remove.is_dirty())
if cls.is_tabless( view_to_remove ):
dupe_views.append(view_to_remove)
return dupe_views
@staticmethod
def is_tabless(view):
return view.size() < 1 and view.name() == '' and view.file_name() is None
def travel_to_pane(self, direction, create_new_if_necessary=False):
adjacent_cell = self.adjacent_cell(direction)
if adjacent_cell:
cells = self.get_cells()
new_group_index = cells.index(adjacent_cell)
self.window.focus_group(new_group_index)
elif create_new_if_necessary:
self.create_pane(direction, True)
def carry_file_to_pane(self, direction, create_new_if_necessary=False):
view = self.window.active_view()
if view == None:
# If we're in an empty group, there's no active view
return
window = self.window
self.travel_to_pane(direction, create_new_if_necessary)
active_group = window.active_group()
views_in_group = window.views_in_group(active_group)
window.set_view_index(view, active_group, len(views_in_group))
sublime.set_timeout(lambda: window.focus_view(view))
def clone_file_to_pane(self, direction, create_new_if_necessary=False):
window = self.window
view = window.active_view()
if view == None:
# If we're in an empty group, there's no active view
return
group, original_index = window.get_view_index(view)
window.run_command('clone_file')
# If we move the cloned file's tab to the left of the original's,
# then when we remove it from the group, focus will fall to the
# original view.
new_view = window.active_view()
window.set_view_index(new_view, group, original_index)
# Fix the new view's selection and viewport
new_sel = new_view.sel()
new_sel.clear()
for s in view.sel():
new_sel.add(s)
sublime.set_timeout(lambda : new_view.set_viewport_position(view.viewport_position(), False), 0)
self.carry_file_to_pane(direction, create_new_if_necessary)
def reorder_panes(self, leave_files_at_position = True):
_, _, cells = self.layout()
current_cell = cells[self.window.active_group()]
old_index = self.window.active_group()
on_done = partial(self._on_reorder_done, old_index, leave_files_at_position)
view = self.window.show_input_panel('enter new index', str(old_index+1), on_done, None, None)
view.sel().clear()
view.sel().add(sublime.Region(0, view.size()))
def _on_reorder_done(self, old_index, leave_files_at_position, text):
try:
new_index = int(text) - 1
except ValueError:
return
rows, cols, cells = self.layout()
if new_index < 0 or new_index >= len(cells):
return
cells[old_index], cells[new_index] = cells[new_index], cells[old_index]
if leave_files_at_position:
old_files = self.window.views_in_group(old_index)
new_files = self.window.views_in_group(new_index)
for position, v in enumerate(old_files):
self.window.set_view_index(v, new_index, position)
for position, v in enumerate(new_files):
self.window.set_view_index(v, old_index, position)
layout = {'cols': cols, 'rows': rows, 'cells': cells}
fixed_set_layout(self.window, layout)
def resize_panes(self, orientation, mode):
rows, cols, cells = self.layout()
if orientation == 'cols':
data = cols
min1 = YMIN
max1 = YMAX
min2 = XMIN
max2 = XMAX
elif orientation == 'rows':
data = rows
min1 = XMIN
max1 = XMAX
min2 = YMIN
max2 = YMAX
relevant_indx = set()
if mode == 'BEFORE':
current_cell = cells[self.window.active_group()]
relevant_indx.update(set([current_cell[min2]]))
elif mode == 'AFTER':
current_cell = cells[self.window.active_group()]
relevant_indx.update(set([current_cell[max2]]))
elif mode == 'NEAREST':
current_cell = cells[self.window.active_group()]
relevant_indx.update(set([current_cell[min2], current_cell[max2]]))
elif mode == 'RELEVANT':
current_cell = cells[self.window.active_group()]
min_val1 = current_cell[min1]
max_val1 = current_cell[max1]
for c in cells:
min_val2 = c[min1]
max_val2 = c[max1]
if min_val1 >= max_val2 or min_val2 >= max_val1:
continue
relevant_indx.update(set([c[min2], c[max2]]))
elif mode == 'ALL':
relevant_indx.update(set(range(len(data))))
relevant_indx.difference_update(set([0, len(data)-1])) # dont show the first and last value (it's always 0 and 1)
relevant_indx = sorted(relevant_indx)
text = ', '.join([str(data[i]) for i in relevant_indx])
on_done = partial(self._on_resize_panes, orientation, cells, relevant_indx, data)
on_update = partial(self._on_resize_panes_update, orientation, cells, relevant_indx, data)
on_cancle = partial(self._on_resize_panes, orientation, cells, relevant_indx, data, text)
view = self.window.show_input_panel(orientation, text, on_done, on_update, on_cancle)
view.sel().clear()
view.sel().add(sublime.Region(0,view.size()))
def _on_resize_panes_layout(self, orientation, cells, relevant_indx, orig_data, text):
window = self.window
rows, cols, _ = self.layout()
input_data = [float(x) for x in text.split(',')]
if any(d > 1.0 for d in input_data):
return {'cols': cols, 'rows': rows, 'cells': cells}
cells = copy.deepcopy(cells)
data = copy.deepcopy(orig_data)
for i, d in zip(relevant_indx, input_data):
data[i] = d
data = list(enumerate(data))
data = sorted(data, key=lambda x: x[1]) # sort such that you can swap grid lines
indxes, data = map(list, zip(*data)) # indexes are also sorted
revelant_cell_entries = []
if orientation == 'cols':
revelant_cell_entries = [XMIN,XMAX]
elif orientation == 'rows':
revelant_cell_entries = [YMIN,YMAX]
# change the cell boundaries according to the sorted indexes
transformations = [(old, new) for new, old in enumerate(indxes) if new != old]
for i in range(len(cells)):
for j in revelant_cell_entries:
for old, new in transformations:
if cells[i][j] == old:
cells[i][j] = new
break
if orientation == 'cols':
if len(cols) == len(data):
cols = data
elif orientation == 'rows':
if len(rows) == len(data):
rows = data
return {'cols': cols, 'rows': rows, 'cells': cells}
def _on_resize_panes_update(self, orientation, cells, relevant_indx, orig_data, text):
layout = self._on_resize_panes_layout(orientation, cells, relevant_indx, orig_data, text)
fixed_set_layout_no_focus_change(self.window, layout)
def _on_resize_panes(self, orientation, cells, relevant_indx, orig_data, text):
layout = self._on_resize_panes_layout(orientation, cells, relevant_indx, orig_data, text)
fixed_set_layout(self.window, layout)
def zoom_pane(self, fraction, skip_saving):
window = self.window
active_group = window.active_group()
settings = window.settings()
origami_fraction = settings.get( 'origami_fraction' )
original_panes_layout = settings.get( 'original_panes_layout' )
if not skip_saving and ( origami_fraction or original_panes_layout ):
print('Origami Error: Trying to zoom a zoomed pane!')
unmaximize_pane( window )
return
if fraction is None:
fraction = .8
fraction = min(1, max(0, fraction))
rows,cols,cells = self.layout()
current_cell = cells[active_group]
current_col = current_cell[0]
num_cols = len(cols)-1
if not skip_saving:
settings.set( 'original_panes_layout', window.layout() )
#TODO: the sizes of the unzoomed panes are calculated incorrectly if the
# unzoomed panes have a split that overlaps the zoomed pane.
current_col_width = 1 if num_cols==1 else fraction
other_col_width = 0 if num_cols==1 else (1-current_col_width)/(num_cols-1)
cols = [0.0]
for i in range(0,num_cols):
cols.append(cols[i] + (current_col_width if i == current_col else other_col_width))
current_row = current_cell[1]
num_rows = len(rows)-1
current_row_height = 1 if num_rows==1 else fraction
other_row_height = 0 if num_rows==1 else (1-current_row_height)/(num_rows-1)
rows = [0.0]
for i in range(0,num_rows):
rows.append(rows[i] + (current_row_height if i == current_row else other_row_height))
layout = {'cols': cols, 'rows': rows, 'cells': cells}
fixed_set_layout(window, layout)
settings.set( 'origami_fraction', fraction )
settings.set( 'max_pane_maximized', None )
settings.set( 'maximized_pane_group', window.active_group() )
def unzoom_pane(self):
window = self.window
active_group = window.active_group()
layout = self.settings().get('original_panes_layout')
remember_panes_layout = self.settings().get('remember_panes_layout')
window.settings().set( 'origami_fraction', None )
window.settings().set( 'original_panes_layout', None )
window.settings().set( 'max_pane_maximized', False )
if not ( remember_panes_layout and layout ):
rows,cols,cells = self.layout()
current_cell = cells[active_group]
num_cols = len(cols)-1
col_width = 1.0/num_cols
cols = [0.0]
for i in range(0,num_cols):
cols.append(cols[i] + col_width)
num_rows = len(rows)-1
row_height = 1.0/num_rows
rows = [0.0]
for i in range(0,num_rows):
rows.append(rows[i] + row_height)
layout = {'cols': cols, 'rows': rows, 'cells': cells}
fixed_set_layout(window, layout)
def has_zoom(self):
return is_pane_zoomed( self.window )
def create_pane(self, direction, give_focus=False):
has_zoom = self.has_zoom()
fraction = self.window.settings().get( 'origami_fraction' )
give_focus = give_focus or has_zoom and not give_focus
run_unzoomed( self, lambda: self._create_pane( direction, give_focus, has_zoom, fraction ) )
def _create_pane(self, direction, give_focus, has_zoom, fraction):
window = self.window
rows, cols, cells = self.layout()
active_group = window.active_group()
old_cell = cells.pop(active_group)
new_cell = []
if direction in ('up', 'down'):
cells = push_down_cells_after(cells, old_cell[YMAX])
rows.insert(old_cell[YMAX], (rows[old_cell[YMIN]] + rows[old_cell[YMAX]]) / 2)
new_cell = [old_cell[XMIN], old_cell[YMAX], old_cell[XMAX], old_cell[YMAX]+1]
old_cell = [old_cell[XMIN], old_cell[YMIN], old_cell[XMAX], old_cell[YMAX]]
elif direction in ('right', 'left'):
cells = push_right_cells_after(cells, old_cell[XMAX])
cols.insert(old_cell[XMAX], (cols[old_cell[XMIN]] + cols[old_cell[XMAX]]) / 2)
new_cell = [old_cell[XMAX], old_cell[YMIN], old_cell[XMAX]+1, old_cell[YMAX]]
old_cell = [old_cell[XMIN], old_cell[YMIN], old_cell[XMAX], old_cell[YMAX]]
if new_cell:
if direction in ('left', 'up'):
focused_cell = new_cell
unfocused_cell = old_cell
else:
focused_cell = old_cell
unfocused_cell = new_cell
cells.insert(active_group, focused_cell)
cells.append(unfocused_cell)
layout = {'cols': cols, 'rows': rows, 'cells': cells}
fixed_set_layout(window, layout)
if give_focus:
self.travel_to_pane(direction)
if has_zoom and not self.settings().get('unzoom_after_creating_pane', False):
maximize_pane( window, fraction )
def _destroy_current_pane(self, has_zoom, fraction):
#Out of the four adjacent panes, one was split to create this pane.
#Find out which one, move to it, then destroy this pane.
cells = self.get_cells()
current = cells[self.window.active_group()]
choices = {}
choices['up'] = self.adjacent_cell('up')
choices['right'] = self.adjacent_cell('right')
choices['down'] = self.adjacent_cell('down')
choices['left'] = self.adjacent_cell('left')
target_dir = None
for dir,c in choices.items():
if not c:
continue
if dir in ['up', 'down']:
if c[XMIN] == current[XMIN] and c[XMAX] == current[XMAX]:
target_dir = dir
elif dir in ['left', 'right']:
if c[YMIN] == current[YMIN] and c[YMAX] == current[YMAX]:
target_dir = dir
if target_dir:
self.travel_to_pane(target_dir)
self._destroy_pane( opposite_direction( target_dir ), has_zoom, fraction )
def destroy_pane(self, direction):
has_zoom = self.has_zoom()
fraction = self.window.settings().get( 'origami_fraction' )
run_unzoomed( self, lambda: self._destroy_pane( direction, has_zoom, fraction ) )
def _destroy_pane(self, direction, has_zoom, fraction):
if direction == 'self':
self._destroy_current_pane( has_zoom, fraction )
return
window = self.window
rows, cols, cells = self.layout()
active_group = window.active_group()
cell_to_remove = None
current_cell = cells[active_group]
adjacent_cells = cells_adjacent_to_cell_in_direction(cells, current_cell, direction)
if len(adjacent_cells) == 1:
cell_to_remove = adjacent_cells[0]
if cell_to_remove:
active_view = window.active_view()
group_to_remove = cells.index(cell_to_remove)
# dupe_views = self.duplicated_views(active_group, group_to_remove)
dupe_views = self.tabless_views(window, group_to_remove)
# print('destroy_pane dupe_views', dupe_views)
for view_to_remove in dupe_views:
window.focus_view(view_to_remove)
window.run_command('close')
if active_view:
window.focus_view(active_view)
cells.remove(cell_to_remove)
if direction == 'up':
rows.pop(cell_to_remove[YMAX])
adjacent_cells = cells_adjacent_to_cell_in_direction(cells, cell_to_remove, 'down')
for cell in adjacent_cells:
cells[cells.index(cell)][YMIN] = cell_to_remove[YMIN]
cells = pull_up_cells_after(cells, cell_to_remove[YMAX])
elif direction == 'right':
cols.pop(cell_to_remove[XMIN])
adjacent_cells = cells_adjacent_to_cell_in_direction(cells, cell_to_remove, 'left')
for cell in adjacent_cells:
cells[cells.index(cell)][XMAX] = cell_to_remove[XMAX]
cells = pull_left_cells_after(cells, cell_to_remove[XMIN])
elif direction == 'down':
rows.pop(cell_to_remove[YMIN])
adjacent_cells = cells_adjacent_to_cell_in_direction(cells, cell_to_remove, 'up')
for cell in adjacent_cells:
cells[cells.index(cell)][YMAX] = cell_to_remove[YMAX]
cells = pull_up_cells_after(cells, cell_to_remove[YMIN])
elif direction == 'left':
cols.pop(cell_to_remove[XMAX])
adjacent_cells = cells_adjacent_to_cell_in_direction(cells, cell_to_remove, 'right')
for cell in adjacent_cells:
cells[cells.index(cell)][XMIN] = cell_to_remove[XMIN]
cells = pull_left_cells_after(cells, cell_to_remove[XMAX])
layout = {'cols': cols, 'rows': rows, 'cells': cells}
fixed_set_layout(window, layout)
if has_zoom and not self.settings().get('unzoom_after_closing_pane', False):
maximize_pane( window, fraction )
def pull_file_from_pane(self, direction):
adjacent_cell = self.adjacent_cell(direction)
if adjacent_cell:
cells = self.get_cells()
group_index = cells.index(adjacent_cell)
view = self.window.active_view_in_group(group_index)
if view:
active_group_index = self.window.active_group()
views_in_group = self.window.views_in_group(active_group_index)
self.window.set_view_index(view, active_group_index, len(views_in_group))
class TravelToPaneCommand(PaneCommand):
def run(self, direction, create_new_if_necessary=None):
if create_new_if_necessary is None:
create_new_if_necessary = self.settings().get('create_new_pane_if_necessary')
self.travel_to_pane(direction, create_new_if_necessary)
class CarryFileToPaneCommand(PaneCommand):
def run(self, direction, create_new_if_necessary=None):
if create_new_if_necessary is None:
create_new_if_necessary = self.settings().get('create_new_pane_if_necessary')
self.carry_file_to_pane(direction, create_new_if_necessary)
class CloneFileToPaneCommand(PaneCommand):
def run(self, direction, create_new_if_necessary=None):
if create_new_if_necessary is None:
create_new_if_necessary = self.settings().get('create_new_pane_if_necessary')
self.clone_file_to_pane(direction, create_new_if_necessary)
class CreatePaneWithFileCommand(PaneCommand):
def run(self, direction):
self.create_pane(direction)
self.carry_file_to_pane(direction)
class CreatePaneWithClonedFileCommand(PaneCommand):
def run(self, direction):
self.create_pane(direction)
self.clone_file_to_pane(direction)
class PullFileFromPaneCommand(PaneCommand):
def run(self, direction):
self.pull_file_from_pane(direction)
class ZoomPaneCommand(PaneCommand):
def run(self, fraction=None, skip_saving=False):
self.zoom_pane(fraction, skip_saving)
class UnzoomPaneCommand(PaneCommand):
def run(self):
self.unzoom_pane()
class ToggleZoomPaneCommand(sublime_plugin.WindowCommand):
def run(self, fraction=None):
window = self.window
settings = window.settings()
max_pane_maximized = settings.get( 'max_pane_maximized' )
origami_fraction = settings.get( 'origami_fraction' )
original_panes_layout = settings.get( 'original_panes_layout' )
# print( 'max_pane max_pane_maximized %-5s, origami_fraction: %-5s, original_panes_layout, %-5s' % ( max_pane_maximized, origami_fraction, original_panes_layout is not None ) )
if is_pane_zoomed( window ):
if origami_fraction:
window.run_command( 'unzoom_pane' )
else:
if max_pane_maximized:
window.run_command( 'zoom_pane', { 'fraction': fraction, 'skip_saving': True } )
else:
print( "Origami Error: Invalid zooming state!" )
window.run_command( 'unmaximize_pane' )
else:
num_groups = window.num_groups()
if num_groups > 1:
window.run_command( 'zoom_pane', { 'fraction': fraction } )
else:
print( "Origami Error: Cannot zoom a window only with '%s' panes!" % num_groups )
class CreatePaneCommand(PaneCommand):
def run(self, direction, give_focus=False):
self.create_pane(direction, give_focus)
class DestroyPaneCommand(PaneCommand):
def run(self, direction):
self.destroy_pane(direction)
class ResizePaneCommand(PaneCommand):
def run(self, orientation, mode = None):
if mode == None:
mode = 'NEAREST'
self.resize_panes(orientation, mode)
class ReorderPaneCommand(PaneCommand):
def run(self):
self.reorder_panes()
class SaveLayoutCommand(PaneCommand):
""" Save the current layout configuration in a settings file. """
def __init__(self, window):
self.window = window
super(SaveLayoutCommand, self).__init__(window)
def on_done(self, nickname):
saved_layouts = self.settings().get('saved_layouts')
layout_names = [l['nickname'] for l in saved_layouts]
layout_data = self.layout()
if nickname in layout_names:
dialog_str = ("You already have a layout stored as '{0}'.\n\n"
"Do you want to continue and overwrite that "
"layout?".format(nickname))
dialog_btn = "Overwrite layout"
if sublime.ok_cancel_dialog(dialog_str, dialog_btn):
def get_index(seq, attr, value):
return next(i for (i, d) in enumerate(seq) if d[attr] == value)
layout = saved_layouts[get_index(saved_layouts, 'nickname', nickname)]
layout['rows'] = layout_data[0]
layout['cols'] = layout_data[1]
layout['cells'] = layout_data[2]
else:
self.window.run_command('save_layout')
return
else:
layout = {}
layout['nickname'] = nickname
layout['rows'] = layout_data[0]
layout['cols'] = layout_data[1]
layout['cells'] = layout_data[2]
saved_layouts.append(layout)
self.settings().set('saved_layouts', saved_layouts)
sublime.save_settings('Origami.sublime-settings')
def run(self):
self.window.show_input_panel(
'Window layout nickname:',
'',
self.on_done,
None,
None
)
class RestoreLayoutCommand(PaneCommand):
""" Restore a saved layout from a settings file. """
def __init__(self, window):
self.window = window
super(RestoreLayoutCommand, self).__init__(window)
def on_done(self, index):
saved_layouts = self.settings().get('saved_layouts')
if index != -1:
selected_layout = saved_layouts[index]
layout = {}
layout['cells'] = selected_layout['cells']
layout['cols'] = selected_layout['cols']
layout['rows'] = selected_layout['rows']
fixed_set_layout(self.window, layout)
def run(self):
if self.settings().has('saved_layouts'):
saved_layouts = self.settings().get('saved_layouts')
layout_names = [l['nickname'] for l in saved_layouts]
self.window.show_quick_panel(layout_names, self.on_done)
class RemoveLayoutCommand(PaneCommand):
""" Remove a previously saved layout from your settings file. """
def __init__(self, window):
self.window = window
super(RemoveLayoutCommand, self).__init__(window)
def on_done(self, index):
saved_layouts = self.settings().get('saved_layouts')
if index != -1:
saved_layouts.pop(index)
self.settings().set('saved_layouts', saved_layouts)
sublime.save_settings('Origami.sublime-settings')
def run(self):
if self.settings().has('saved_layouts'):
saved_layouts = self.settings().get('saved_layouts')
layout_names = [l['nickname'] for l in saved_layouts]
self.window.show_quick_panel(layout_names, self.on_done)
class NewWindowFromSavedLayoutCommand(PaneCommand):
""" Brings up a list of saved views and allows the user
to create a new window using that layout. """
def __init__(self, window):
self.window = window
super(NewWindowFromSavedLayoutCommand, self).__init__(window)
def on_done(self, index):
saved_layouts = self.settings().get('saved_layouts')
if index != -1:
selected_layout = saved_layouts[index]
layout = {}
layout['cells'] = selected_layout['cells']
layout['cols'] = selected_layout['cols']
layout['rows'] = selected_layout['rows']
self.window.run_command('new_window')
new_window = sublime.active_window()
fixed_set_layout(new_window, layout)
def run(self):
if self.settings().has('saved_layouts'):
saved_layouts = self.settings().get('saved_layouts')
layout_names = [l['nickname'] for l in saved_layouts]
self.window.show_quick_panel(layout_names, self.on_done)
class NewWindowWithCurrentLayoutCommand(PaneCommand):
""" Opens a new window using the current layout settings. """
def __init__(self, window):
self.window = window
super(NewWindowWithCurrentLayoutCommand, self).__init__(window)
def run(self):
layout = self.window.layout()
self.window.run_command('new_window')
new_window = sublime.active_window()
fixed_set_layout(new_window, layout)
class AutoCloseEmptyPanes(sublime_plugin.EventListener, WithSettings):
def is_tabless_view(self, view):
""" When you make a new pane, it comes with a tabless view that gets a tab when you type
into it. You also get a similar view when using the command palette to open a file.
If we think it's this kind of view, return True."""
if sublime.version()[0] == '2':
return False
else:
window = view.window() or sublime.active_window()
if window and window.get_view_index(view)[1] == -1:
return True
return False
def on_close(self, view):
if sublime.version()[0] == '2':
self.on_pre_close(view)
def on_pre_close(self, view):
# Read from global settings for backward compatibility
auto_close = view.settings().get('origami_auto_close_empty_panes', False)
auto_close = self.settings().get('auto_close_empty_panes', auto_close)
if self.is_tabless_view(view):
# We don't want to close the pane when closing a transient view
return
window = sublime.active_window()
active_group = window.active_group()
# We're in pre_close, so use set_timeout to close the group right after this.
if len(window.views_in_group(active_group)) < 2:
if auto_close:
sublime.set_timeout( lambda: window.run_command('destroy_pane', {'direction':'self'}), 100 )
class AutoZoomOnFocus(sublime_plugin.EventListener, WithSettings):
running = False
active_group = -1
def delayed_zoom(self, view, fraction):
# zoom_pane hangs sublime if you destroy the pane above or to your left.
# call it in a sublime.set_timeout to fix the issue
# Sublime Text 2 has issues on startup where views don't have windows yet.
# If we don't have a window yet, bail.
if view.window() is None:
self.running = False
return
args = {}
# Work correctly if someone sets 'origami_auto_zoom_on_focus': true rather
# than e.g. 'origami_auto_zoom_on_focus': .8.
if fraction != True:
args['fraction'] = fraction
view.window().run_command('zoom_pane', args)
self.running = False
def on_activated(self, view):
if self.running:
return
# Read from global settings for backward compatibility
fraction = view.settings().get('origami_auto_zoom_on_focus', False)
fraction = self.settings().get('auto_zoom_on_focus', fraction)
if not fraction:
return
if view.settings().get('is_widget'):
return
new_active_group = view.window().active_group()
if new_active_group == self.active_group:
return
self.active_group = new_active_group
self.running = True
sublime.set_timeout(lambda: self.delayed_zoom(view, fraction), 0)
class OrigamiMoveToGroupCommand(PaneCommand):
def run(self, group):
window = self.window
def move():
time.sleep(0.1)
# print('running move')
window.run_command( 'move_to_group', { 'group': group } )
threading.Thread(target=focus).start()
def focus():
time.sleep(0.2)
# print('running focus')
window.run_command( 'focus_group', { 'group': group } )
threading.Thread(target=move).start()
|
tester.py | import torch
from torch.autograd import Variable
import sys
import time
from datetime import datetime
import numpy as np
import scipy as sp
import scipy.linalg as linalg
import torch.multiprocessing
import matplotlib.pyplot as plt
def test_speed_inverse_gesv(ndim=10):
A = torch.randn(ndim, ndim)
A = A.mm(A.t())
eig, _ = torch.symeig(A)
print(torch.min(eig))
matrix = Variable(A + 1e0 * torch.eye(ndim), requires_grad=True)
n_rep = 10000
inv1 = None
start_time = time.time()
for _ in range(n_rep):
inv1, _ = torch.gesv(torch.diag(Variable(matrix.data.new(matrix.size(0)).fill_(1))), matrix)
print(time.strftime('%H:%M:%S', time.gmtime(time.time() - start_time)))
inv2 = None
start_time = time.time()
for _ in range(n_rep):
inv2 = matrix.inverse()
print(time.strftime('%H:%M:%S', time.gmtime(time.time() - start_time)))
print(torch.sum((inv1.data - inv2.data)**2))
def test_speed_scipy_torch(ndim=10):
A = torch.randn(ndim, ndim)
A = A.mm(A.t())
eig, _ = torch.symeig(A)
print(torch.min(eig))
matrix = Variable(A + 1e0 * torch.eye(ndim), requires_grad=True)
n_rep = 10000
start_time = time.time()
for _ in range(n_rep):
_, lu = torch.gesv(Variable(matrix.data.new(ndim).fill_(1)).view(-1), matrix)
print(time.strftime('%H:%M:%S', time.gmtime(time.time() - start_time)))
print(torch.prod(torch.diag(lu)))
start_time = time.time()
for _ in range(n_rep):
_, _, u = linalg.lu(matrix.data.numpy())
print(time.strftime('%H:%M:%S', time.gmtime(time.time() - start_time)))
print(np.prod(np.diag(u)))
def test_speed_gesv_potrf(ndim=10):
A = torch.randn(ndim, ndim)
A = A.mm(A.t())
A += 1e-6 * torch.eye(ndim)
eig, _ = torch.symeig(A)
if torch.min(eig) < 0:
A += (1e-6 - eig) * torch.eye(ndim)
eig, _ = torch.symeig(A)
print(torch.min(eig))
print(torch.sum(torch.log(eig)))
matrix = A
n_rep = 100
start_time = time.time()
for _ in range(n_rep):
_, lu = torch.gesv(matrix.new(ndim).fill_(1).view(-1), matrix)
result = torch.sum(torch.log(torch.diag(lu)))
print('GESV')
print(time.strftime('%H:%M:%S', time.gmtime(time.time() - start_time)))
print(result)
diag = torch.diag(lu)
print(torch.sum(torch.log(diag[diag > 0])))
print(torch.min(torch.diag(lu)), torch.max(torch.diag(lu)))
start_time = time.time()
for _ in range(n_rep):
chol_from_upper = torch.potrf(matrix, True)
chol_from_lower = torch.potrf(matrix, False)
result = (torch.sum(torch.log(torch.diag(chol_from_upper)), 0, keepdim=True) + torch.sum(torch.log(torch.diag(chol_from_lower)), 0, keepdim=True)).view(1, 1)
print('POTRF')
print(time.strftime('%H:%M:%S', time.gmtime(time.time() - start_time)))
print(result)
print(torch.min(torch.diag(chol_from_upper)), torch.max(torch.diag(chol_from_upper)))
print(torch.min(torch.diag(chol_from_lower)), torch.max(torch.diag(chol_from_lower)))
def object_comparison():
from HyperSphere.GP.models.gp_regression import GPRegression
from HyperSphere.GP.kernels.modules.matern52 import Matern52
from HyperSphere.GP.kernels.modules.squared_exponential import SquaredExponentialKernel
from HyperSphere.BO.shadow_inference.inference_slide_both import ShadowInference as si1
from HyperSphere.BO.shadow_inference.inference_slide_origin import ShadowInference as si2
from HyperSphere.BO.shadow_inference.inference_slide_origin import ShadowInference as si3
model1 = GPRegression(Matern52(3))
model2 = GPRegression(SquaredExponentialKernel(3))
a = si1((Variable(torch.randn(10, 3)), Variable(torch.randn(10, 3))), model1)
b = a.__class__((a.train_x, a.train_y), model1)
print(a.__class__ is b.__class__)
print(a.__class__)
print(b.__class__)
def kumaraswamy():
import matplotlib.pyplot as plt
n_value = 10
a_pool = np.linspace(0.5, 5, n_value)
b_pool = np.linspace(0.5, 5, n_value)
x = np.linspace(0, 1, 100)
for i in range(n_value):
# y = 1 - (1 - x ** 1.0) ** b_pool[i]
# label = '%.4f' % b_pool[i]
y = 1 - (1 - x ** a_pool[i]) ** 2.0
label = '%.4f' % a_pool[i]
plt.plot(x, y, label=label)
plt.legend()
plt.show()
def test_func(i):
p = torch.multiprocessing.current_process()
print(p.pid)
sys.stdout.flush()
ndim = 500
A = Variable(torch.randn(ndim, ndim))
b = Variable(torch.randn(ndim, 4))
x = torch.gesv(b, A)[0]
return x
def multiprocessor_test():
n = 5
pool = torch.multiprocessing.Pool(n)
result_list = []
for i in range(n):
time.sleep(0.1)
result_list.append(pool.apply_async(test_func, args=(i,)))
print('At %s, running %d process' % (datetime.now().strftime('%Y%m%d-%H:%M:%S:%f'), [p.ready() for p in result_list].count(False)))
sys.stdout.flush()
print("all processes are running")
sys.stdout.flush()
while [p.ready() for p in result_list].count(False) > 0:
time.sleep(0.1)
print('At %s, running %d process' % (datetime.now().strftime('%Y%m%d-%H:%M:%S:%f'), [p.ready() for p in result_list].count(False)))
result = [p.get() for p in result_list]
for res in result:
print(res.size())
# process_list = [torch.multiprocessing.Process(target=test_func, args=(i, )) for i in range(n)]
# print("Before start n_running : %d" % [p.is_alive() for p in process_list].count(True))
# sys.stdout.flush()
# for p in process_list:
# p.start()
# while [p.is_alive() for p in process_list].count(True) > 0:
# time.sleep(0.1)
# print(datetime.now().strftime('%Y%m%d-%H:%M:%S:%f'))
# sys.stdout.flush()
print 'Done'
def inversion_time(n_data):
start_time = time.time()
A = sp.randn(n_data, n_data)
print('random generation' + time.strftime("%H:%M:%S", time.gmtime(time.time() - start_time)))
start_time = time.time()
B = linalg.inv(A)
print('matrix inversion' + time.strftime("%H:%M:%S", time.gmtime(time.time() - start_time)))
def centering_check():
x = np.linspace(0, 1, 100)
zeros_list = []
zeros = []
for s in x:
poly_zeros = np.roots([-0.25, 0, 0.75, 0.5 - s])
zeros_list.append(poly_zeros)
assert np.sum(np.imag(poly_zeros)) == 0
smallest_abs = np.argmin(np.abs(poly_zeros))
zeros.append(poly_zeros[smallest_abs])
print(zeros_list)
plt.plot(x, zeros)
plt.show()
def inverse_with_center(center_probability=0.5):
if isinstance(center_probability, (float, int)):
center_probability = torch.zeros(54) + center_probability
shift = []
for i in range(center_probability.numel()):
poly_d = center_probability.squeeze()[i]
if poly_d == 0:
shift.append(-1.0)
elif poly_d == 1:
shift.append(1.0)
elif 0 < poly_d < 1:
poly_zeros = np.roots([-0.25, 0, 0.75, 0.5 - poly_d])
shift.append(poly_zeros[np.argmin(np.abs(poly_zeros))])
shift = torch.FloatTensor(shift)
target = torch.linspace(0, 0.5, 55)[1:]
target_np = target.numpy()
zero_list = []
for i in range(54):
zeros = np.roots([2, -3, 0, target_np[i]])
zero = zeros[np.logical_and(zeros >= 0, zeros <= 1)][0] * 2.0 - 1.0 - shift[i]
zero_list.append(zero)
zero_list[-1] = 0
print zero_list
if __name__ == '__main__':
inverse_with_center()
|
video_downloade.py | import sys
import youtube_dl
from threading import Thread
from PyQt5.QtWidgets import QApplication, QWidget\
,QPushButton, QLabel, QLineEdit, QComboBox
class MyLogger(object):
def __init__(self, msgLabel):
self._msgLabel = msgLabel
def debug(self, msg):
msg = msg.replace('\n', '')
self._msgLabel.setText(msg)
self._msgLabel.adjustSize()
def warning(self, msg):
msg = msg.replace('\n', '')
self._msgLabel.setText(msg)
self._msgLabel.adjustSize()
def error(self, msg):
msg = msg.replace('\n', '')
self._msgLabel.setText(msg)
self._msgLabel.adjustSize()
class Downloader():
def download(url, port, model, msgLabel):
if url == '':
return
opt = {'outtmpl': '%(title)s.%(ext)s',
'logger':MyLogger(msgLabel)}
if model == '均衡':
opt['format'] = 'best[height<=?480]'
elif model == '画质优先':
opt['format'] = 'best'
elif model == '速度优先':
opt['format'] = 'worst'
if port != '':
opt['proxy'] = '127.0.0.1:' + port
ydl_opts = (opt)
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
video = ydl.download([url])
class VideoDownLoadUi(QWidget):
def __init__(self):
super().__init__()
self._port = ''
self._url = ''
self._model = '均衡'
self.initUI()
def initUI(self):
self.setWindowTitle('Video Download')
self.setFixedSize(600, 190)
startBtn = QPushButton('下载', self)
startBtn.resize(startBtn.sizeHint())
startBtn.move(450, 73)
startBtn.clicked.connect(self.startClicked)
lbl1 = QLabel('视频地址', self)
lbl1.move(10, 40)
lbl2 = QLabel('本地代理端口', self)
lbl2.move(10, 80)
lbl3 = QLabel('模式', self)
lbl3.move(250, 80)
msgLabel = QLabel('空闲', self)
msgLabel.move(10, 120)
self._msgLabel = msgLabel
urlEdit = QLineEdit(self)
urlEdit.textChanged[str].connect(self.urlChanged)
urlEdit.move(100, 38)
urlEdit.setFixedSize(490, 22)
portEdit = QLineEdit(self)
portEdit.textChanged[str].connect(self.portChanged)
portEdit.move(120, 78)
portEdit.setFixedSize(100, 22)
combo = QComboBox(self)
combo.addItem("画质优先")
combo.addItem("均衡")
combo.addItem("速度优先")
combo.setCurrentIndex(1)
combo.move(300, 78)
combo.activated[str].connect(self.onActivated)
self.show()
def startClicked(self):
Thread(target=Downloader.download, args=(self._url, self._port, self._model, self._msgLabel)).start()
def portChanged(self, text):
self._port = text
def urlChanged(self, text):
self._url = text
def onActivated(self, text):
self._model = text
app = QApplication(sys.argv)
ex = VideoDownLoadUi()
sys.exit(app.exec_()) |
acs_client.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import socket
import threading
from time import sleep
from os.path import expanduser, join, isfile
import os
import paramiko
import paramiko.agent
from sshtunnel import SSHTunnelForwarder
from scp import SCPClient
from knack.prompting import prompt_pass
from knack.util import CLIError
def _load_key(key_filename):
pkey = None
try:
pkey = paramiko.RSAKey.from_private_key_file(key_filename, None)
except paramiko.PasswordRequiredException:
key_pass = prompt_pass('Password for private key:')
pkey = paramiko.RSAKey.from_private_key_file(key_filename, key_pass)
if pkey is None:
raise CLIError('failed to load key: {}'.format(key_filename))
return pkey
def _load_keys(key_filename=None, allow_agent=True):
keys = []
default_key_path = join(expanduser("~"), '.ssh', 'id_rsa')
if key_filename is not None:
key = _load_key(key_filename)
keys.append(key)
if allow_agent:
agent = paramiko.agent.Agent()
for key in agent.get_keys():
keys.append(key)
if not keys and isfile(default_key_path):
key = _load_key(default_key_path)
keys.append(key)
if not keys:
raise CLIError('No keys available in ssh agent or no key in {}. '
'Do you need to add keys to your ssh agent via '
'ssh-add or specify a --ssh-key-file?'.format(default_key_path))
return keys
def secure_copy(user, host, src, dest, key_filename=None, allow_agent=True):
keys = _load_keys(key_filename, allow_agent)
pkey = keys[0]
ssh = paramiko.SSHClient()
proxy = None
ssh_config_file = os.path.expanduser("~/.ssh/config")
if os.path.isfile(ssh_config_file):
conf = paramiko.SSHConfig()
with open(ssh_config_file) as f:
conf.parse(f)
host_config = conf.lookup(host)
if 'proxycommand' in host_config:
proxy = paramiko.ProxyCommand(host_config['proxycommand'])
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=user, pkey=pkey, sock=proxy)
scp = SCPClient(ssh.get_transport())
scp.get(src, dest)
scp.close()
class ACSClient(object):
def __init__(self, client=None):
self.client = client
self.transport = None
self.tunnel_server = None
self.host = None
self.username = None
self.port = None
def __del__(self):
if self.transport is not None:
self.transport.close()
if self.client is not None:
self.client.close()
if self.tunnel_server is not None:
self.tunnel_server.close_tunnel()
def connect(self, host, username, port=2200,
key_filename=None):
"""
Creates a connection to the remote server.
:param host: Remote host
:type host: String
:param username: User name to connect to the remote host
:type username: String
:param port: Remote host port
:type port: Number
"""
if not host:
raise ValueError('Host is missing')
if not username:
raise ValueError('Username is missing')
if not port:
raise ValueError('Missing port')
self.host = host
self.username = username
self.port = port
if self.client is None:
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
pkey = None
if key_filename is not None:
pkey = _load_key(key_filename)
self.client.connect(
hostname=host,
port=port,
username=username,
pkey=pkey)
self.transport = self.client.get_transport()
return self.transport is not None
def run(self, command, background=False):
"""
Runs a command on the remote host
:param command: Command to run on the remote host
:type command: String
:param background: True to run it in a separate thread,
False should be run in the foreground
:type command: Boolean
"""
if background:
t = threading.Thread(target=ACSClient._run_cmd, args=(self, command))
t.daemon = True
t.start()
return None
return self._run_cmd(command)
def _run_cmd(self, command):
"""
Runs a command on the remote host
:param command: Command to run on the remote host
:type command: String
"""
if not command:
raise ValueError('Command is missing')
_, stdout, stderr = self.client.exec_command(command)
return stdout, stderr
def file_exists(self, file_path):
"""
Checks if file on the remote exists
:param file_path: Full path to the file on remote machine
:type file_path: String
"""
if not file_path:
raise ValueError('Missing file path')
if self.transport is None:
raise TypeError('Transport cannot be none')
sftp = self.transport.open_sftp_client()
result = None
try:
sftp.stat(file_path)
result = True
except IOError:
result = False
finally:
sftp.close()
return result
def create_tunnel(self, remote_host, remote_port, local_port=0):
"""
Creates a tunnel to the remote host
:param remote_host: Remote host to tunnel to
:type remote_host: String
:param remote_port: Remote port to tunnel to
:type remote_port: Number
:param local_port: Local port. If set to 0, random local port is selected
:type local_port: Number
"""
if local_port is 0:
local_port = self.get_available_local_port()
with SSHTunnelForwarder((self.host, self.port),
ssh_username=self.username,
remote_bind_address=(remote_host, remote_port),
local_bind_address=('0.0.0.0', local_port)):
try:
while True:
sleep(1)
except KeyboardInterrupt:
pass
@staticmethod
def get_available_local_port():
"""
Gets a random, available local port
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # pylint: disable=no-member
s.bind(('', 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
|
_asyncio.py | import array
import asyncio
import concurrent.futures
import math
import socket
import sys
from asyncio.base_events import _run_until_complete_cb # type: ignore
from collections import OrderedDict, deque
from concurrent.futures import Future
from dataclasses import dataclass
from functools import partial, wraps
from inspect import (
CORO_RUNNING, CORO_SUSPENDED, GEN_RUNNING, GEN_SUSPENDED, getcoroutinestate, getgeneratorstate)
from io import IOBase
from queue import Queue
from socket import AddressFamily, SocketKind, SocketType
from threading import Thread
from types import TracebackType
from typing import (
Any, Awaitable, Callable, Collection, Coroutine, Deque, Dict, Generator, List, Optional,
Sequence, Set, Tuple, Type, TypeVar, Union, cast)
from weakref import WeakKeyDictionary
from .. import CapacityLimiterStatistics, EventStatistics, TaskInfo, abc
from .._core._compat import DeprecatedAsyncContextManager, DeprecatedAwaitable
from .._core._eventloop import claim_worker_thread, threadlocals
from .._core._exceptions import (
BrokenResourceError, BusyResourceError, ClosedResourceError, EndOfStream)
from .._core._exceptions import ExceptionGroup as BaseExceptionGroup
from .._core._exceptions import WouldBlock
from .._core._sockets import GetAddrInfoReturnType, convert_ipv6_sockaddr
from .._core._synchronization import CapacityLimiter as BaseCapacityLimiter
from .._core._synchronization import Event as BaseEvent
from .._core._synchronization import ResourceGuard
from .._core._tasks import CancelScope as BaseCancelScope
from ..abc import IPSockAddrType, UDPPacketType
from ..lowlevel import RunVar
if sys.version_info >= (3, 8):
get_coro = asyncio.Task.get_coro
else:
def get_coro(task: asyncio.Task) -> Union[Coroutine, Generator]:
return task._coro
if sys.version_info >= (3, 7):
from asyncio import all_tasks, create_task, current_task, get_running_loop
from asyncio import run as native_run
def find_root_task() -> asyncio.Task:
for task in all_tasks():
if task._callbacks:
for cb, context in task._callbacks: # type: ignore
if cb is _run_until_complete_cb or cb.__module__ == 'uvloop.loop':
return task
raise RuntimeError('Cannot find root task for setting cleanup callback')
else:
_T = TypeVar('_T')
def native_run(main, *, debug=False):
# Snatched from Python 3.7
from asyncio import coroutines, events, tasks
def _cancel_all_tasks(loop):
to_cancel = all_tasks(loop)
if not to_cancel:
return
for task in to_cancel:
task.cancel()
loop.run_until_complete(
tasks.gather(*to_cancel, loop=loop, return_exceptions=True))
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler({
'message': 'unhandled exception during asyncio.run() shutdown',
'exception': task.exception(),
'task': task,
})
if events._get_running_loop() is not None:
raise RuntimeError(
"asyncio.run() cannot be called from a running event loop")
if not coroutines.iscoroutine(main):
raise ValueError("a coroutine was expected, got {!r}".format(main))
loop = events.new_event_loop()
try:
events.set_event_loop(loop)
loop.set_debug(debug)
return loop.run_until_complete(main)
finally:
try:
_cancel_all_tasks(loop)
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
events.set_event_loop(None)
loop.close()
def create_task(coro: Union[Generator[Any, None, _T], Awaitable[_T]], *, # type: ignore
name: Optional[str] = None) -> asyncio.Task:
return get_running_loop().create_task(coro)
def get_running_loop() -> asyncio.AbstractEventLoop:
loop = asyncio._get_running_loop()
if loop is not None:
return loop
else:
raise RuntimeError('no running event loop')
def all_tasks(loop: Optional[asyncio.AbstractEventLoop] = None) -> Set[asyncio.Task]:
"""Return a set of all tasks for the loop."""
from asyncio import Task
if loop is None:
loop = get_running_loop()
return {t for t in Task.all_tasks(loop) if not t.done()}
def current_task(loop: Optional[asyncio.AbstractEventLoop] = None) -> Optional[asyncio.Task]:
if loop is None:
loop = get_running_loop()
return asyncio.Task.current_task(loop)
def find_root_task() -> asyncio.Task:
for task in all_tasks():
for cb in task._callbacks:
if cb is _run_until_complete_cb or cb.__module__ == 'uvloop.loop':
return task
raise RuntimeError('Cannot find root task for setting cleanup callback')
T_Retval = TypeVar('T_Retval')
# Check whether there is native support for task names in asyncio (3.8+)
_native_task_names = hasattr(asyncio.Task, 'get_name')
WORKER_THREAD_MAX_IDLE_TIME = 10 # seconds
def get_callable_name(func: Callable) -> str:
module = getattr(func, '__module__', None)
qualname = getattr(func, '__qualname__', None)
return '.'.join([x for x in (module, qualname) if x])
#
# Event loop
#
_run_vars = WeakKeyDictionary() # type: WeakKeyDictionary[asyncio.AbstractEventLoop, Any]
current_token = get_running_loop
def _task_started(task: asyncio.Task) -> bool:
"""Return ``True`` if the task has been started and has not finished."""
coro = get_coro(task)
try:
return getcoroutinestate(coro) in (CORO_RUNNING, CORO_SUSPENDED)
except AttributeError:
try:
return getgeneratorstate(coro) in (GEN_RUNNING, GEN_SUSPENDED)
except AttributeError:
# task coro is async_genenerator_asend https://bugs.python.org/issue37771
raise Exception(f"Cannot determine if task {task} has started or not")
def _maybe_set_event_loop_policy(policy: Optional[asyncio.AbstractEventLoopPolicy],
use_uvloop: bool) -> None:
# On CPython, use uvloop when possible if no other policy has been given and if not
# explicitly disabled
if policy is None and use_uvloop and sys.implementation.name == 'cpython':
try:
import uvloop
except ImportError:
pass
else:
# Test for missing shutdown_default_executor() (uvloop 0.14.0 and earlier)
if (not hasattr(asyncio.AbstractEventLoop, 'shutdown_default_executor')
or hasattr(uvloop.loop.Loop, 'shutdown_default_executor')):
policy = uvloop.EventLoopPolicy()
if policy is not None:
asyncio.set_event_loop_policy(policy)
def run(func: Callable[..., T_Retval], *args, debug: bool = False, use_uvloop: bool = True,
policy: Optional[asyncio.AbstractEventLoopPolicy] = None) -> T_Retval:
@wraps(func)
async def wrapper():
task = current_task()
task_state = TaskState(None, get_callable_name(func), None)
_task_states[task] = task_state
if _native_task_names:
task.set_name(task_state.name)
try:
return await func(*args)
finally:
del _task_states[task]
_maybe_set_event_loop_policy(policy, use_uvloop)
return native_run(wrapper(), debug=debug)
#
# Miscellaneous
#
sleep = asyncio.sleep
#
# Timeouts and cancellation
#
CancelledError = asyncio.CancelledError
class CancelScope(BaseCancelScope, DeprecatedAsyncContextManager):
def __new__(cls, *, deadline: float = math.inf, shield: bool = False):
return object.__new__(cls)
def __init__(self, deadline: float = math.inf, shield: bool = False):
self._deadline = deadline
self._shield = shield
self._parent_scope: Optional[CancelScope] = None
self._cancel_called = False
self._active = False
self._timeout_handle: Optional[asyncio.TimerHandle] = None
self._cancel_handle: Optional[asyncio.Handle] = None
self._tasks: Set[asyncio.Task] = set()
self._host_task: Optional[asyncio.Task] = None
self._timeout_expired = False
def __enter__(self):
if self._active:
raise RuntimeError(
"Each CancelScope may only be used for a single 'with' block"
)
self._host_task = current_task()
self._tasks.add(self._host_task)
try:
task_state = _task_states[self._host_task]
except KeyError:
task_name = self._host_task.get_name() if _native_task_names else None
task_state = TaskState(None, task_name, self)
_task_states[self._host_task] = task_state
else:
self._parent_scope = task_state.cancel_scope
task_state.cancel_scope = self
self._timeout()
self._active = True
return self
def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> Optional[bool]:
if not self._active:
raise RuntimeError('This cancel scope is not active')
if current_task() is not self._host_task:
raise RuntimeError('Attempted to exit cancel scope in a different task than it was '
'entered in')
assert self._host_task is not None
host_task_state = _task_states.get(self._host_task)
if host_task_state is None or host_task_state.cancel_scope is not self:
raise RuntimeError("Attempted to exit a cancel scope that isn't the current tasks's "
"current cancel scope")
self._active = False
if self._timeout_handle:
self._timeout_handle.cancel()
self._timeout_handle = None
self._tasks.remove(self._host_task)
host_task_state.cancel_scope = self._parent_scope
# Restart the cancellation effort in the nearest directly cancelled parent scope if this
# one was shielded
if self._shield:
scope = self._parent_scope
while scope is not None:
if scope._cancel_called and scope._cancel_handle is None:
scope._deliver_cancellation()
break
# No point in looking beyond any shielded scope
if scope._shield:
break
scope = scope._parent_scope
if exc_val is not None:
exceptions = exc_val.exceptions if isinstance(exc_val, ExceptionGroup) else [exc_val]
if all(isinstance(exc, CancelledError) for exc in exceptions):
if self._timeout_expired:
return True
elif not self._cancel_called:
# Task was cancelled natively
return None
elif not self._parent_cancelled():
# This scope was directly cancelled
return True
return None
def _timeout(self):
if self._deadline != math.inf:
loop = get_running_loop()
if loop.time() >= self._deadline:
self._timeout_expired = True
self.cancel()
else:
self._timeout_handle = loop.call_at(self._deadline, self._timeout)
def _deliver_cancellation(self) -> None:
"""
Deliver cancellation to directly contained tasks and nested cancel scopes.
Schedule another run at the end if we still have tasks eligible for cancellation.
"""
should_retry = False
cancellable_tasks: Set[asyncio.Task] = set()
current = current_task()
for task in self._tasks:
# The task is eligible for cancellation if it has started and is not in a cancel
# scope shielded from this one
cancel_scope = _task_states[task].cancel_scope
while cancel_scope is not self:
if cancel_scope is None or cancel_scope._shield:
break
else:
cancel_scope = cancel_scope._parent_scope
else:
should_retry = True
if task is not current and (task is self._host_task or _task_started(task)):
cancellable_tasks.add(task)
for task in cancellable_tasks:
task.cancel()
# Schedule another callback if there are still tasks left
if should_retry:
self._cancel_handle = get_running_loop().call_soon(self._deliver_cancellation)
else:
self._cancel_handle = None
def _parent_cancelled(self) -> bool:
# Check whether any parent has been cancelled
cancel_scope = self._parent_scope
while cancel_scope is not None and not cancel_scope._shield:
if cancel_scope._cancel_called:
return True
else:
cancel_scope = cancel_scope._parent_scope
return False
def cancel(self) -> DeprecatedAwaitable:
if not self._cancel_called:
if self._timeout_handle:
self._timeout_handle.cancel()
self._timeout_handle = None
self._cancel_called = True
self._deliver_cancellation()
return DeprecatedAwaitable(self.cancel)
@property
def deadline(self) -> float:
return self._deadline
@deadline.setter
def deadline(self, value: float) -> None:
self._deadline = float(value)
if self._timeout_handle is not None:
self._timeout_handle.cancel()
self._timeout_handle = None
if self._active and not self._cancel_called:
self._timeout()
@property
def cancel_called(self) -> bool:
return self._cancel_called
@property
def shield(self) -> bool:
return self._shield
async def checkpoint() -> None:
await sleep(0)
async def checkpoint_if_cancelled() -> None:
task = current_task()
if task is None:
return
try:
cancel_scope = _task_states[task].cancel_scope
except KeyError:
return
while cancel_scope:
if cancel_scope.cancel_called:
await sleep(0)
elif cancel_scope.shield:
break
else:
cancel_scope = cancel_scope._parent_scope
async def cancel_shielded_checkpoint() -> None:
with CancelScope(shield=True):
await sleep(0)
def current_effective_deadline():
try:
cancel_scope = _task_states[current_task()].cancel_scope
except KeyError:
return math.inf
deadline = math.inf
while cancel_scope:
deadline = min(deadline, cancel_scope.deadline)
if cancel_scope.shield:
break
else:
cancel_scope = cancel_scope._parent_scope
return deadline
def current_time():
return get_running_loop().time()
#
# Task states
#
class TaskState:
"""
Encapsulates auxiliary task information that cannot be added to the Task instance itself
because there are no guarantees about its implementation.
"""
__slots__ = 'parent_id', 'name', 'cancel_scope'
def __init__(self, parent_id: Optional[int], name: Optional[str],
cancel_scope: Optional[CancelScope]):
self.parent_id = parent_id
self.name = name
self.cancel_scope = cancel_scope
_task_states = WeakKeyDictionary() # type: WeakKeyDictionary[asyncio.Task, TaskState]
#
# Task groups
#
class ExceptionGroup(BaseExceptionGroup):
def __init__(self, exceptions: Sequence[BaseException]):
super().__init__()
self.exceptions = exceptions
class _AsyncioTaskStatus(abc.TaskStatus):
def __init__(self, future: asyncio.Future):
self._future = future
def started(self, value=None) -> None:
self._future.set_result(value)
class TaskGroup(abc.TaskGroup):
def __init__(self):
self.cancel_scope: CancelScope = CancelScope()
self._active = False
self._exceptions: List[BaseException] = []
async def __aenter__(self):
self.cancel_scope.__enter__()
self._active = True
return self
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> Optional[bool]:
ignore_exception = self.cancel_scope.__exit__(exc_type, exc_val, exc_tb)
if exc_val is not None:
self.cancel_scope.cancel()
self._exceptions.append(exc_val)
while self.cancel_scope._tasks:
try:
await asyncio.wait(self.cancel_scope._tasks)
except asyncio.CancelledError:
self.cancel_scope.cancel()
self._active = False
if not self.cancel_scope._parent_cancelled():
exceptions = self._filter_cancellation_errors(self._exceptions)
else:
exceptions = self._exceptions
try:
if len(exceptions) > 1:
raise ExceptionGroup(exceptions)
elif exceptions and exceptions[0] is not exc_val:
raise exceptions[0]
except BaseException as exc:
# Clear the context here, as it can only be done in-flight.
# If the context is not cleared, it can result in recursive tracebacks (see #145).
exc.__context__ = None
raise
return ignore_exception
@staticmethod
def _filter_cancellation_errors(exceptions: Sequence[BaseException]) -> List[BaseException]:
filtered_exceptions: List[BaseException] = []
for exc in exceptions:
if isinstance(exc, ExceptionGroup):
exc.exceptions = TaskGroup._filter_cancellation_errors(exc.exceptions)
if len(exc.exceptions) > 1:
filtered_exceptions.append(exc)
elif exc.exceptions:
filtered_exceptions.append(exc.exceptions[0])
elif not isinstance(exc, CancelledError):
filtered_exceptions.append(exc)
return filtered_exceptions
async def _run_wrapped_task(
self, coro: Coroutine, task_status_future: Optional[asyncio.Future]) -> None:
# This is the code path for Python 3.6 and 3.7 on which asyncio freaks out if a task raises
# a BaseException.
__traceback_hide__ = __tracebackhide__ = True # noqa: F841
task = cast(asyncio.Task, current_task())
try:
await coro
except BaseException as exc:
if task_status_future is None or task_status_future.done():
self._exceptions.append(exc)
self.cancel_scope.cancel()
else:
task_status_future.set_exception(exc)
else:
if task_status_future is not None and not task_status_future.done():
task_status_future.set_exception(
RuntimeError('Child exited without calling task_status.started()'))
finally:
if task in self.cancel_scope._tasks:
self.cancel_scope._tasks.remove(task)
del _task_states[task]
def _spawn(self, func: Callable[..., Coroutine], args: tuple, name,
task_status_future: Optional[asyncio.Future] = None) -> asyncio.Task:
def task_done(_task: asyncio.Task) -> None:
# This is the code path for Python 3.8+
assert _task in self.cancel_scope._tasks
self.cancel_scope._tasks.remove(_task)
del _task_states[_task]
try:
exc = _task.exception()
except CancelledError as e:
exc = e
if exc is not None:
if task_status_future is None or task_status_future.done():
self._exceptions.append(exc)
self.cancel_scope.cancel()
else:
task_status_future.set_exception(exc)
elif task_status_future is not None and not task_status_future.done():
task_status_future.set_exception(
RuntimeError('Child exited without calling task_status.started()'))
if not self._active:
raise RuntimeError('This task group is not active; no new tasks can be started.')
options = {}
name = name or get_callable_name(func)
if _native_task_names:
options['name'] = name
kwargs = {}
if task_status_future:
kwargs['task_status'] = _AsyncioTaskStatus(task_status_future)
coro = func(*args, **kwargs)
if not asyncio.iscoroutine(coro):
raise TypeError(f'Expected an async function, but {func} appears to be synchronous')
foreign_coro = not hasattr(coro, 'cr_frame') and not hasattr(coro, 'gi_frame')
if foreign_coro or sys.version_info < (3, 8):
coro = self._run_wrapped_task(coro, task_status_future)
task = create_task(coro, **options)
if not foreign_coro and sys.version_info >= (3, 8):
task.add_done_callback(task_done)
# Make the spawned task inherit the task group's cancel scope
_task_states[task] = TaskState(parent_id=id(current_task()), name=name,
cancel_scope=self.cancel_scope)
self.cancel_scope._tasks.add(task)
return task
def start_soon(self, func: Callable[..., Coroutine], *args, name=None) -> None:
self._spawn(func, args, name)
async def start(self, func: Callable[..., Coroutine], *args, name=None) -> None:
future: asyncio.Future = asyncio.Future()
task = self._spawn(func, args, name, future)
# If the task raises an exception after sending a start value without a switch point
# between, the task group is cancelled and this method never proceeds to process the
# completed future. That's why we have to have a shielded cancel scope here.
with CancelScope(shield=True):
try:
return await future
except CancelledError:
task.cancel()
raise
#
# Threads
#
_Retval_Queue_Type = Tuple[Optional[T_Retval], Optional[BaseException]]
def _thread_pool_worker(work_queue: Queue, loop: asyncio.AbstractEventLoop) -> None:
func: Callable
args: tuple
future: asyncio.Future
with claim_worker_thread('asyncio'):
loop = threadlocals.loop = loop
while True:
func, args, future = work_queue.get()
if func is None:
# Shutdown command received
return
if not future.cancelled():
try:
result = func(*args)
except BaseException as exc:
if not loop.is_closed() and not future.cancelled():
loop.call_soon_threadsafe(future.set_exception, exc)
else:
if not loop.is_closed() and not future.cancelled():
loop.call_soon_threadsafe(future.set_result, result)
work_queue.task_done()
_threadpool_work_queue: RunVar[Queue] = RunVar('_threadpool_work_queue')
_threadpool_idle_workers: RunVar[Deque[Tuple[Thread, float]]] = RunVar(
'_threadpool_idle_workers')
_threadpool_workers: RunVar[Set[Thread]] = RunVar('_threadpool_workers')
def _loop_shutdown_callback(f: asyncio.Future) -> None:
"""This is called when the root task has finished."""
for _ in range(len(_threadpool_workers.get())):
_threadpool_work_queue.get().put_nowait((None, None, None))
async def run_sync_in_worker_thread(
func: Callable[..., T_Retval], *args, cancellable: bool = False,
limiter: Optional['CapacityLimiter'] = None) -> T_Retval:
await checkpoint()
# If this is the first run in this event loop thread, set up the necessary variables
try:
work_queue = _threadpool_work_queue.get()
idle_workers = _threadpool_idle_workers.get()
workers = _threadpool_workers.get()
except LookupError:
work_queue = Queue()
idle_workers = deque()
workers = set()
_threadpool_work_queue.set(work_queue)
_threadpool_idle_workers.set(idle_workers)
_threadpool_workers.set(workers)
find_root_task().add_done_callback(_loop_shutdown_callback)
async with (limiter or current_default_thread_limiter()):
with CancelScope(shield=not cancellable):
future: asyncio.Future = asyncio.Future()
work_queue.put_nowait((func, args, future))
if not idle_workers:
thread = Thread(target=_thread_pool_worker, args=(work_queue, get_running_loop()),
name='AnyIO worker thread')
workers.add(thread)
thread.start()
else:
thread, idle_since = idle_workers.pop()
# Prune any other workers that have been idle for WORKER_MAX_IDLE_TIME seconds or
# longer
now = current_time()
while idle_workers:
if now - idle_workers[0][1] < WORKER_THREAD_MAX_IDLE_TIME:
break
idle_workers.popleft()
work_queue.put_nowait(None)
workers.remove(thread)
try:
return await future
finally:
idle_workers.append((thread, current_time()))
def run_sync_from_thread(func: Callable[..., T_Retval], *args,
loop: Optional[asyncio.AbstractEventLoop] = None) -> T_Retval:
@wraps(func)
def wrapper():
try:
f.set_result(func(*args))
except BaseException as exc:
f.set_exception(exc)
if not isinstance(exc, Exception):
raise
f: concurrent.futures.Future[T_Retval] = Future()
loop = loop or threadlocals.loop
loop.call_soon_threadsafe(wrapper)
return f.result()
def run_async_from_thread(func: Callable[..., Coroutine[Any, Any, T_Retval]], *args) -> T_Retval:
f: concurrent.futures.Future[T_Retval] = asyncio.run_coroutine_threadsafe(
func(*args), threadlocals.loop)
return f.result()
class BlockingPortal(abc.BlockingPortal):
def __new__(cls):
return object.__new__(cls)
def __init__(self):
super().__init__()
self._loop = get_running_loop()
def _spawn_task_from_thread(self, func: Callable, args: tuple, kwargs: Dict[str, Any],
name, future: Future) -> None:
run_sync_from_thread(
partial(self._task_group.start_soon, name=name), self._call_func, func, args, kwargs,
future, loop=self._loop)
#
# Subprocesses
#
@dataclass(eq=False)
class StreamReaderWrapper(abc.ByteReceiveStream):
_stream: asyncio.StreamReader
async def receive(self, max_bytes: int = 65536) -> bytes:
data = await self._stream.read(max_bytes)
if data:
return data
else:
raise EndOfStream
async def aclose(self) -> None:
self._stream.feed_eof()
@dataclass(eq=False)
class StreamWriterWrapper(abc.ByteSendStream):
_stream: asyncio.StreamWriter
async def send(self, item: bytes) -> None:
self._stream.write(item)
await self._stream.drain()
async def aclose(self) -> None:
self._stream.close()
@dataclass(eq=False)
class Process(abc.Process):
_process: asyncio.subprocess.Process
_stdin: Optional[StreamWriterWrapper]
_stdout: Optional[StreamReaderWrapper]
_stderr: Optional[StreamReaderWrapper]
async def aclose(self) -> None:
if self._stdin:
await self._stdin.aclose()
if self._stdout:
await self._stdout.aclose()
if self._stderr:
await self._stderr.aclose()
await self.wait()
async def wait(self) -> int:
return await self._process.wait()
def terminate(self) -> None:
self._process.terminate()
def kill(self) -> None:
self._process.kill()
def send_signal(self, signal: int) -> None:
self._process.send_signal(signal)
@property
def pid(self) -> int:
return self._process.pid
@property
def returncode(self) -> Optional[int]:
return self._process.returncode
@property
def stdin(self) -> Optional[abc.ByteSendStream]:
return self._stdin
@property
def stdout(self) -> Optional[abc.ByteReceiveStream]:
return self._stdout
@property
def stderr(self) -> Optional[abc.ByteReceiveStream]:
return self._stderr
async def open_process(command, *, shell: bool, stdin: int, stdout: int, stderr: int):
await checkpoint()
if shell:
process = await asyncio.create_subprocess_shell(command, stdin=stdin, stdout=stdout,
stderr=stderr)
else:
process = await asyncio.create_subprocess_exec(*command, stdin=stdin, stdout=stdout,
stderr=stderr)
stdin_stream = StreamWriterWrapper(process.stdin) if process.stdin else None
stdout_stream = StreamReaderWrapper(process.stdout) if process.stdout else None
stderr_stream = StreamReaderWrapper(process.stderr) if process.stderr else None
return Process(process, stdin_stream, stdout_stream, stderr_stream)
def _forcibly_shutdown_process_pool_on_exit(workers: Set[Process], _task) -> None:
"""
Forcibly shuts down worker processes belonging to this event loop."""
child_watcher: Optional[asyncio.AbstractChildWatcher]
try:
child_watcher = asyncio.get_event_loop_policy().get_child_watcher()
except NotImplementedError:
child_watcher = None
# Close as much as possible (w/o async/await) to avoid warnings
for process in workers:
if process.returncode is None:
continue
process._stdin._stream._transport.close() # type: ignore
process._stdout._stream._transport.close() # type: ignore
process._stderr._stream._transport.close() # type: ignore
process.kill()
if child_watcher:
child_watcher.remove_child_handler(process.pid)
async def _shutdown_process_pool_on_exit(workers: Set[Process]) -> None:
"""
Shuts down worker processes belonging to this event loop.
NOTE: this only works when the event loop was started using asyncio.run() or anyio.run().
"""
process: Process
try:
await sleep(math.inf)
except asyncio.CancelledError:
for process in workers:
if process.returncode is None:
process.kill()
for process in workers:
await process.aclose()
def setup_process_pool_exit_at_shutdown(workers: Set[Process]) -> None:
kwargs = {'name': 'AnyIO process pool shutdown task'} if _native_task_names else {}
create_task(_shutdown_process_pool_on_exit(workers), **kwargs)
find_root_task().add_done_callback(partial(_forcibly_shutdown_process_pool_on_exit, workers))
#
# Sockets and networking
#
class StreamProtocol(asyncio.Protocol):
read_queue: Deque[bytes]
read_event: asyncio.Event
write_future: asyncio.Future
exception: Optional[Exception] = None
def connection_made(self, transport: asyncio.BaseTransport) -> None:
self.read_queue = deque()
self.read_event = asyncio.Event()
self.write_future = asyncio.Future()
self.write_future.set_result(None)
cast(asyncio.Transport, transport).set_write_buffer_limits(0)
def connection_lost(self, exc: Optional[Exception]) -> None:
if exc:
self.exception = BrokenResourceError()
self.exception.__cause__ = exc
self.read_event.set()
self.write_future = asyncio.Future()
if self.exception:
self.write_future.set_exception(self.exception)
else:
self.write_future.set_result(None)
def data_received(self, data: bytes) -> None:
self.read_queue.append(data)
self.read_event.set()
def eof_received(self) -> Optional[bool]:
self.read_event.set()
return True
def pause_writing(self) -> None:
self.write_future = asyncio.Future()
def resume_writing(self) -> None:
self.write_future.set_result(None)
class DatagramProtocol(asyncio.DatagramProtocol):
read_queue: Deque[Tuple[bytes, IPSockAddrType]]
read_event: asyncio.Event
write_event: asyncio.Event
exception: Optional[Exception] = None
def connection_made(self, transport: asyncio.BaseTransport) -> None:
self.read_queue = deque(maxlen=100) # arbitrary value
self.read_event = asyncio.Event()
self.write_event = asyncio.Event()
self.write_event.set()
def connection_lost(self, exc: Optional[Exception]) -> None:
self.read_event.set()
self.write_event.set()
def datagram_received(self, data: bytes, addr: IPSockAddrType) -> None:
addr = convert_ipv6_sockaddr(addr)
self.read_queue.append((data, addr))
self.read_event.set()
def error_received(self, exc: Exception) -> None:
self.exception = exc
def pause_writing(self) -> None:
self.write_event.clear()
def resume_writing(self) -> None:
self.write_event.set()
class SocketStream(abc.SocketStream):
def __init__(self, transport: asyncio.Transport, protocol: StreamProtocol):
self._transport = transport
self._protocol = protocol
self._receive_guard = ResourceGuard('reading from')
self._send_guard = ResourceGuard('writing to')
self._closed = False
@property
def _raw_socket(self) -> socket.socket:
return self._transport.get_extra_info('socket')
async def receive(self, max_bytes: int = 65536) -> bytes:
with self._receive_guard:
await checkpoint()
if not self._protocol.read_event.is_set() and not self._transport.is_closing():
self._transport.resume_reading()
await self._protocol.read_event.wait()
self._transport.pause_reading()
try:
chunk = self._protocol.read_queue.popleft()
except IndexError:
if self._closed:
raise ClosedResourceError from None
elif self._protocol.exception:
raise self._protocol.exception
else:
raise EndOfStream
if len(chunk) > max_bytes:
# Split the oversized chunk
chunk, leftover = chunk[:max_bytes], chunk[max_bytes:]
self._protocol.read_queue.appendleft(leftover)
# If the read queue is empty, clear the flag so that the next call will block until
# data is available
if not self._protocol.read_queue:
self._protocol.read_event.clear()
return chunk
async def send(self, item: bytes) -> None:
with self._send_guard:
await checkpoint()
try:
self._transport.write(item)
except RuntimeError as exc:
if self._protocol.write_future.exception():
await self._protocol.write_future
elif self._closed:
raise ClosedResourceError from None
elif self._transport.is_closing():
raise BrokenResourceError from exc
else:
raise
await self._protocol.write_future
async def send_eof(self) -> None:
try:
self._transport.write_eof()
except OSError:
pass
async def aclose(self) -> None:
if not self._transport.is_closing():
self._closed = True
try:
self._transport.write_eof()
except OSError:
pass
self._transport.close()
await sleep(0)
self._transport.abort()
class UNIXSocketStream(abc.SocketStream):
_receive_future: Optional[asyncio.Future] = None
_send_future: Optional[asyncio.Future] = None
_closing = False
def __init__(self, raw_socket: socket.SocketType):
self.__raw_socket = raw_socket
self._loop = get_running_loop()
self._receive_guard = ResourceGuard('reading from')
self._send_guard = ResourceGuard('writing to')
@property
def _raw_socket(self) -> SocketType:
return self.__raw_socket
def _wait_until_readable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future:
def callback(f):
del self._receive_future
loop.remove_reader(self.__raw_socket)
f = self._receive_future = asyncio.Future()
self._loop.add_reader(self.__raw_socket, f.set_result, None)
f.add_done_callback(callback)
return f
def _wait_until_writable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future:
def callback(f):
del self._send_future
loop.remove_writer(self.__raw_socket)
f = self._send_future = asyncio.Future()
self._loop.add_writer(self.__raw_socket, f.set_result, None)
f.add_done_callback(callback)
return f
async def send_eof(self) -> None:
with self._send_guard:
self._raw_socket.shutdown(socket.SHUT_WR)
async def receive(self, max_bytes: int = 65536) -> bytes:
loop = get_running_loop()
await checkpoint()
with self._receive_guard:
while True:
try:
data = self.__raw_socket.recv(max_bytes)
except BlockingIOError:
await self._wait_until_readable(loop)
except OSError as exc:
if self._closing:
raise ClosedResourceError from None
else:
raise BrokenResourceError from exc
else:
if not data:
raise EndOfStream
return data
async def send(self, item: bytes) -> None:
loop = get_running_loop()
await checkpoint()
with self._send_guard:
view = memoryview(item)
while view:
try:
bytes_sent = self.__raw_socket.send(item)
except BlockingIOError:
await self._wait_until_writable(loop)
except OSError as exc:
if self._closing:
raise ClosedResourceError from None
else:
raise BrokenResourceError from exc
else:
view = view[bytes_sent:]
async def receive_fds(self, msglen: int, maxfds: int) -> Tuple[bytes, List[int]]:
if not isinstance(msglen, int) or msglen < 0:
raise ValueError('msglen must be a non-negative integer')
if not isinstance(maxfds, int) or maxfds < 1:
raise ValueError('maxfds must be a positive integer')
loop = get_running_loop()
fds = array.array("i")
await checkpoint()
with self._receive_guard:
while True:
try:
message, ancdata, flags, addr = self.__raw_socket.recvmsg(
msglen, socket.CMSG_LEN(maxfds * fds.itemsize))
except BlockingIOError:
await self._wait_until_readable(loop)
except OSError as exc:
if self._closing:
raise ClosedResourceError from None
else:
raise BrokenResourceError from exc
else:
if not message and not ancdata:
raise EndOfStream
break
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS:
raise RuntimeError(f'Received unexpected ancillary data; message = {message}, '
f'cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}')
fds.frombytes(cmsg_data[:len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
return message, list(fds)
async def send_fds(self, message: bytes, fds: Collection[Union[int, IOBase]]) -> None:
if not message:
raise ValueError('message must not be empty')
if not fds:
raise ValueError('fds must not be empty')
loop = get_running_loop()
filenos: List[int] = []
for fd in fds:
if isinstance(fd, int):
filenos.append(fd)
elif isinstance(fd, IOBase):
filenos.append(fd.fileno())
fdarray = array.array("i", filenos)
await checkpoint()
with self._send_guard:
while True:
try:
self.__raw_socket.sendmsg([message],
[(socket.SOL_SOCKET, socket.SCM_RIGHTS, fdarray)])
break
except BlockingIOError:
await self._wait_until_writable(loop)
except OSError as exc:
if self._closing:
raise ClosedResourceError from None
else:
raise BrokenResourceError from exc
async def aclose(self) -> None:
if not self._closing:
self._closing = True
if self.__raw_socket.fileno() != -1:
self.__raw_socket.close()
if self._receive_future:
self._receive_future.set_result(None)
if self._send_future:
self._send_future.set_result(None)
class TCPSocketListener(abc.SocketListener):
_accept_scope: Optional[CancelScope] = None
_closed = False
def __init__(self, raw_socket: socket.SocketType):
self.__raw_socket = raw_socket
self._loop = cast(asyncio.BaseEventLoop, get_running_loop())
self._accept_guard = ResourceGuard('accepting connections from')
@property
def _raw_socket(self) -> socket.socket:
return self.__raw_socket
async def accept(self) -> abc.SocketStream:
if self._closed:
raise ClosedResourceError
with self._accept_guard:
await checkpoint()
with CancelScope() as self._accept_scope:
try:
client_sock, _addr = await self._loop.sock_accept(self._raw_socket)
except asyncio.CancelledError:
# Workaround for https://bugs.python.org/issue41317
try:
self._loop.remove_reader(self._raw_socket)
except (ValueError, NotImplementedError):
pass
if self._closed:
raise ClosedResourceError from None
raise
finally:
self._accept_scope = None
client_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
transport, protocol = await self._loop.connect_accepted_socket(StreamProtocol, client_sock)
return SocketStream(cast(asyncio.Transport, transport), cast(StreamProtocol, protocol))
async def aclose(self) -> None:
if self._closed:
return
self._closed = True
if self._accept_scope:
# Workaround for https://bugs.python.org/issue41317
try:
self._loop.remove_reader(self._raw_socket)
except (ValueError, NotImplementedError):
pass
self._accept_scope.cancel()
await sleep(0)
self._raw_socket.close()
class UNIXSocketListener(abc.SocketListener):
def __init__(self, raw_socket: socket.SocketType):
self.__raw_socket = raw_socket
self._loop = get_running_loop()
self._accept_guard = ResourceGuard('accepting connections from')
self._closed = False
async def accept(self) -> abc.SocketStream:
await checkpoint()
with self._accept_guard:
while True:
try:
client_sock, _ = self.__raw_socket.accept()
return UNIXSocketStream(client_sock)
except BlockingIOError:
f: asyncio.Future = asyncio.Future()
self._loop.add_reader(self.__raw_socket, f.set_result, None)
f.add_done_callback(lambda _: self._loop.remove_reader(self.__raw_socket))
await f
except OSError as exc:
if self._closed:
raise ClosedResourceError from None
else:
raise BrokenResourceError from exc
async def aclose(self) -> None:
self._closed = True
self.__raw_socket.close()
@property
def _raw_socket(self) -> SocketType:
return self.__raw_socket
class UDPSocket(abc.UDPSocket):
def __init__(self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol):
self._transport = transport
self._protocol = protocol
self._receive_guard = ResourceGuard('reading from')
self._send_guard = ResourceGuard('writing to')
self._closed = False
@property
def _raw_socket(self) -> SocketType:
return self._transport.get_extra_info('socket')
async def aclose(self) -> None:
if not self._transport.is_closing():
self._closed = True
self._transport.close()
async def receive(self) -> Tuple[bytes, IPSockAddrType]:
with self._receive_guard:
await checkpoint()
# If the buffer is empty, ask for more data
if not self._protocol.read_queue and not self._transport.is_closing():
self._protocol.read_event.clear()
await self._protocol.read_event.wait()
try:
return self._protocol.read_queue.popleft()
except IndexError:
if self._closed:
raise ClosedResourceError from None
else:
raise BrokenResourceError from None
async def send(self, item: UDPPacketType) -> None:
with self._send_guard:
await checkpoint()
await self._protocol.write_event.wait()
if self._closed:
raise ClosedResourceError
elif self._transport.is_closing():
raise BrokenResourceError
else:
self._transport.sendto(*item)
class ConnectedUDPSocket(abc.ConnectedUDPSocket):
def __init__(self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol):
self._transport = transport
self._protocol = protocol
self._receive_guard = ResourceGuard('reading from')
self._send_guard = ResourceGuard('writing to')
self._closed = False
@property
def _raw_socket(self) -> SocketType:
return self._transport.get_extra_info('socket')
async def aclose(self) -> None:
if not self._transport.is_closing():
self._closed = True
self._transport.close()
async def receive(self) -> bytes:
with self._receive_guard:
await checkpoint()
# If the buffer is empty, ask for more data
if not self._protocol.read_queue and not self._transport.is_closing():
self._protocol.read_event.clear()
await self._protocol.read_event.wait()
try:
packet = self._protocol.read_queue.popleft()
except IndexError:
if self._closed:
raise ClosedResourceError from None
else:
raise BrokenResourceError from None
return packet[0]
async def send(self, item: bytes) -> None:
with self._send_guard:
await checkpoint()
await self._protocol.write_event.wait()
if self._closed:
raise ClosedResourceError
elif self._transport.is_closing():
raise BrokenResourceError
else:
self._transport.sendto(item)
async def connect_tcp(host: str, port: int,
local_addr: Optional[Tuple[str, int]] = None) -> SocketStream:
transport, protocol = cast(
Tuple[asyncio.Transport, StreamProtocol],
await get_running_loop().create_connection(StreamProtocol, host, port,
local_addr=local_addr)
)
transport.pause_reading()
return SocketStream(transport, protocol)
async def connect_unix(path: str) -> UNIXSocketStream:
await checkpoint()
loop = get_running_loop()
raw_socket = socket.socket(socket.AF_UNIX)
raw_socket.setblocking(False)
while True:
try:
raw_socket.connect(path)
except BlockingIOError:
f: asyncio.Future = asyncio.Future()
loop.add_writer(raw_socket, f.set_result, None)
f.add_done_callback(lambda _: loop.remove_writer(raw_socket))
await f
else:
return UNIXSocketStream(raw_socket)
async def create_udp_socket(
family: socket.AddressFamily,
local_address: Optional[IPSockAddrType],
remote_address: Optional[IPSockAddrType],
reuse_port: bool
) -> Union[UDPSocket, ConnectedUDPSocket]:
result = await get_running_loop().create_datagram_endpoint(
DatagramProtocol, local_addr=local_address, remote_addr=remote_address, family=family,
reuse_port=reuse_port)
transport = cast(asyncio.DatagramTransport, result[0])
protocol = cast(DatagramProtocol, result[1])
if protocol.exception:
transport.close()
raise protocol.exception
if not remote_address:
return UDPSocket(transport, protocol)
else:
return ConnectedUDPSocket(transport, protocol)
async def getaddrinfo(host: Union[bytearray, bytes, str], port: Union[str, int, None], *,
family: Union[int, AddressFamily] = 0, type: Union[int, SocketKind] = 0,
proto: int = 0, flags: int = 0) -> GetAddrInfoReturnType:
# https://github.com/python/typeshed/pull/4304
result = await get_running_loop().getaddrinfo(
host, port, family=family, type=type, proto=proto, flags=flags) # type: ignore[arg-type]
return cast(GetAddrInfoReturnType, result)
async def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> Tuple[str, str]:
# https://github.com/python/typeshed/pull/4305
result = await get_running_loop().getnameinfo(sockaddr, flags)
return cast(Tuple[str, str], result)
_read_events: RunVar[Dict[Any, asyncio.Event]] = RunVar('read_events')
_write_events: RunVar[Dict[Any, asyncio.Event]] = RunVar('write_events')
async def wait_socket_readable(sock: socket.SocketType) -> None:
await checkpoint()
try:
read_events = _read_events.get()
except LookupError:
read_events = {}
_read_events.set(read_events)
if read_events.get(sock):
raise BusyResourceError('reading from') from None
loop = get_running_loop()
event = read_events[sock] = asyncio.Event()
loop.add_reader(sock, event.set)
try:
await event.wait()
finally:
if read_events.pop(sock, None) is not None:
loop.remove_reader(sock)
readable = True
else:
readable = False
if not readable:
raise ClosedResourceError
async def wait_socket_writable(sock: socket.SocketType) -> None:
await checkpoint()
try:
write_events = _write_events.get()
except LookupError:
write_events = {}
_write_events.set(write_events)
if write_events.get(sock):
raise BusyResourceError('writing to') from None
loop = get_running_loop()
event = write_events[sock] = asyncio.Event()
loop.add_writer(sock.fileno(), event.set)
try:
await event.wait()
finally:
if write_events.pop(sock, None) is not None:
loop.remove_writer(sock)
writable = True
else:
writable = False
if not writable:
raise ClosedResourceError
#
# Synchronization
#
class Event(BaseEvent):
def __new__(cls):
return object.__new__(cls)
def __init__(self):
self._event = asyncio.Event()
def set(self) -> DeprecatedAwaitable:
self._event.set()
return DeprecatedAwaitable(self.set)
def is_set(self) -> bool:
return self._event.is_set()
async def wait(self):
if await self._event.wait():
await checkpoint()
def statistics(self) -> EventStatistics:
return EventStatistics(len(self._event._waiters))
class CapacityLimiter(BaseCapacityLimiter):
_total_tokens: float = 0
def __new__(cls, total_tokens: float):
return object.__new__(cls)
def __init__(self, total_tokens: float):
self._borrowers: Set[Any] = set()
self._wait_queue: Dict[Any, asyncio.Event] = OrderedDict()
self.total_tokens = total_tokens
async def __aenter__(self):
await self.acquire()
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
self.release()
@property
def total_tokens(self) -> float:
return self._total_tokens
@total_tokens.setter
def total_tokens(self, value: float) -> None:
if not isinstance(value, int) and not math.isinf(value):
raise TypeError('total_tokens must be an int or math.inf')
if value < 1:
raise ValueError('total_tokens must be >= 1')
old_value = self._total_tokens
self._total_tokens = value
events = []
for event in self._wait_queue.values():
if value <= old_value:
break
if not event.is_set():
events.append(event)
old_value += 1
for event in events:
event.set()
@property
def borrowed_tokens(self) -> int:
return len(self._borrowers)
@property
def available_tokens(self) -> float:
return self._total_tokens - len(self._borrowers)
def acquire_nowait(self) -> DeprecatedAwaitable:
self.acquire_on_behalf_of_nowait(current_task())
return DeprecatedAwaitable(self.acquire_nowait)
def acquire_on_behalf_of_nowait(self, borrower) -> DeprecatedAwaitable:
if borrower in self._borrowers:
raise RuntimeError("this borrower is already holding one of this CapacityLimiter's "
"tokens")
if self._wait_queue or len(self._borrowers) >= self._total_tokens:
raise WouldBlock
self._borrowers.add(borrower)
return DeprecatedAwaitable(self.acquire_on_behalf_of_nowait)
async def acquire(self) -> None:
return await self.acquire_on_behalf_of(current_task())
async def acquire_on_behalf_of(self, borrower) -> None:
await checkpoint_if_cancelled()
try:
self.acquire_on_behalf_of_nowait(borrower)
except WouldBlock:
event = asyncio.Event()
self._wait_queue[borrower] = event
try:
await event.wait()
except BaseException:
self._wait_queue.pop(borrower, None)
raise
self._borrowers.add(borrower)
else:
await cancel_shielded_checkpoint()
def release(self) -> None:
self.release_on_behalf_of(current_task())
def release_on_behalf_of(self, borrower) -> None:
try:
self._borrowers.remove(borrower)
except KeyError:
raise RuntimeError("this borrower isn't holding any of this CapacityLimiter's "
"tokens") from None
# Notify the next task in line if this limiter has free capacity now
if self._wait_queue and len(self._borrowers) < self._total_tokens:
event = self._wait_queue.popitem()[1]
event.set()
def statistics(self) -> CapacityLimiterStatistics:
return CapacityLimiterStatistics(self.borrowed_tokens, self.total_tokens,
tuple(self._borrowers), len(self._wait_queue))
_default_thread_limiter: RunVar[CapacityLimiter] = RunVar('_default_thread_limiter')
def current_default_thread_limiter():
try:
return _default_thread_limiter.get()
except LookupError:
limiter = CapacityLimiter(40)
_default_thread_limiter.set(limiter)
return limiter
#
# Operating system signals
#
class _SignalReceiver(DeprecatedAsyncContextManager):
def __init__(self, signals: Tuple[int, ...]):
self._signals = signals
self._loop = get_running_loop()
self._signal_queue: Deque[int] = deque()
self._future: asyncio.Future = asyncio.Future()
self._handled_signals: Set[int] = set()
def _deliver(self, signum: int) -> None:
self._signal_queue.append(signum)
if not self._future.done():
self._future.set_result(None)
def __enter__(self):
for sig in set(self._signals):
self._loop.add_signal_handler(sig, self._deliver, sig)
self._handled_signals.add(sig)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for sig in self._handled_signals:
self._loop.remove_signal_handler(sig)
def __aiter__(self):
return self
async def __anext__(self) -> int:
await checkpoint()
if not self._signal_queue:
self._future = asyncio.Future()
await self._future
return self._signal_queue.popleft()
def open_signal_receiver(*signals: int) -> _SignalReceiver:
return _SignalReceiver(signals)
#
# Testing and debugging
#
def _create_task_info(task: asyncio.Task) -> TaskInfo:
task_state = _task_states.get(task)
if task_state is None:
name = task.get_name() if _native_task_names else None # type: ignore
parent_id = None
else:
name = task_state.name
parent_id = task_state.parent_id
return TaskInfo(id(task), parent_id, name, get_coro(task))
def get_current_task() -> TaskInfo:
return _create_task_info(current_task()) # type: ignore
def get_running_tasks() -> List[TaskInfo]:
return [_create_task_info(task) for task in all_tasks() if not task.done()]
async def wait_all_tasks_blocked() -> None:
this_task = current_task()
while True:
for task in all_tasks():
if task is this_task:
continue
if task._fut_waiter is None: # type: ignore[attr-defined]
await sleep(0.1)
break
else:
return
class TestRunner(abc.TestRunner):
def __init__(self, debug: bool = False, use_uvloop: bool = True,
policy: Optional[asyncio.AbstractEventLoopPolicy] = None):
_maybe_set_event_loop_policy(policy, use_uvloop)
self._loop = asyncio.new_event_loop()
self._loop.set_debug(debug)
asyncio.set_event_loop(self._loop)
def _cancel_all_tasks(self):
to_cancel = all_tasks(self._loop)
if not to_cancel:
return
for task in to_cancel:
task.cancel()
self._loop.run_until_complete(
asyncio.gather(*to_cancel, loop=self._loop, return_exceptions=True))
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
raise task.exception()
def close(self) -> None:
try:
self._cancel_all_tasks()
self._loop.run_until_complete(self._loop.shutdown_asyncgens())
finally:
asyncio.set_event_loop(None)
self._loop.close()
def call(self, func: Callable[..., Awaitable], *args, **kwargs):
def exception_handler(loop: asyncio.AbstractEventLoop, context: Dict[str, Any]) -> None:
exceptions.append(context['exception'])
exceptions: List[Exception] = []
self._loop.set_exception_handler(exception_handler)
try:
retval = self._loop.run_until_complete(func(*args, **kwargs))
except Exception as exc:
retval = None
exceptions.append(exc)
finally:
self._loop.set_exception_handler(None)
if len(exceptions) == 1:
raise exceptions[0]
elif exceptions:
raise ExceptionGroup(exceptions)
return retval
|
dmc.py | import os
import threading
import time
import timeit
import pprint
from collections import deque
import torch
from torch import multiprocessing as mp
from torch import nn
from .file_writer import FileWriter
from .models import Model
from .utils import get_batch, log, create_env, create_buffers, create_optimizers, act
mean_episode_return_buf = {p:deque(maxlen=100) for p in ['landlord', 'landlord_up', 'landlord_down']}
def compute_loss(logits, targets):
loss = ((logits.squeeze(-1) - targets)**2).mean()
return loss
def learn(position,
actor_models,
model,
batch,
optimizer,
flags,
lock):
"""Performs a learning (optimization) step."""
if flags.training_device != "cpu":
device = torch.device('cuda:'+str(flags.training_device))
else:
device = torch.device('cpu')
obs_x_no_action = batch['obs_x_no_action'].to(device)
obs_action = batch['obs_action'].to(device)
obs_x = torch.cat((obs_x_no_action, obs_action), dim=2).float()
obs_x = torch.flatten(obs_x, 0, 1)
obs_z = torch.flatten(batch['obs_z'].to(device), 0, 1).float()
target = torch.flatten(batch['target'].to(device), 0, 1)
episode_returns = batch['episode_return'][batch['done']]
mean_episode_return_buf[position].append(torch.mean(episode_returns).to(device))
with lock:
learner_outputs = model(obs_z, obs_x, return_value=True)
loss = compute_loss(learner_outputs['values'], target)
stats = {
'mean_episode_return_'+position: torch.mean(torch.stack([_r for _r in mean_episode_return_buf[position]])).item(),
'loss_'+position: loss.item(),
}
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), flags.max_grad_norm)
optimizer.step()
for actor_model in actor_models:
actor_model.get_model(position).load_state_dict(model.state_dict())
return stats
def train(flags):
"""
This is the main funtion for training. It will first
initilize everything, such as buffers, optimizers, etc.
Then it will start subprocesses as actors. Then, it will call
learning function with multiple threads.
"""
plogger = FileWriter(
xpid=flags.xpid,
xp_args=flags.__dict__,
rootdir=flags.savedir,
)
checkpointpath = os.path.expandvars(
os.path.expanduser('%s/%s/%s' % (flags.savedir, flags.xpid, 'model.tar')))
T = flags.unroll_length
B = flags.batch_size
# Initialize actor models
models = []
if not flags.actor_device_cpu:
assert flags.num_actor_devices <= len(flags.gpu_devices.split(',')), 'The number of actor devices can not exceed the number of available devices'
for device in range(flags.num_actor_devices):
model = Model(device=device)
model.share_memory()
model.eval()
models.append(model)
else:
model = Model(device="cpu")
model.share_memory()
model.eval()
models.append(model)
# Initialize buffers
buffers = create_buffers(flags)
# Initialize queues
actor_processes = []
ctx = mp.get_context('spawn')
free_queue = []
full_queue = []
for device in range(flags.num_actor_devices):
_free_queue = {'landlord': ctx.SimpleQueue(), 'landlord_up': ctx.SimpleQueue(), 'landlord_down': ctx.SimpleQueue()}
_full_queue = {'landlord': ctx.SimpleQueue(), 'landlord_up': ctx.SimpleQueue(), 'landlord_down': ctx.SimpleQueue()}
free_queue.append(_free_queue)
full_queue.append(_full_queue)
# Learner model for training
learner_model = Model(device=flags.training_device)
# Create optimizers
optimizers = create_optimizers(flags, learner_model)
# Stat Keys
stat_keys = [
'mean_episode_return_landlord',
'loss_landlord',
'mean_episode_return_landlord_up',
'loss_landlord_up',
'mean_episode_return_landlord_down',
'loss_landlord_down',
]
frames, stats = 0, {k: 0 for k in stat_keys}
position_frames = {'landlord':0, 'landlord_up':0, 'landlord_down':0}
# Load models if any
if flags.load_model and os.path.exists(checkpointpath):
checkpoint_states = torch.load(
checkpointpath, map_location=("cuda:"+str(flags.training_device) if flags.training_device != "cpu" else "cpu")
)
checkpoint_states_cpu = None
if flags.actor_device_cpu:
checkpoint_states_cpu = torch.load(
checkpointpath, map_location="cpu"
)
for k in ['landlord', 'landlord_up', 'landlord_down']:
if flags.training_device != "cpu":
learner_model.get_model(k).load_state_dict(checkpoint_states["model_state_dict"][k])
else:
learner_model.get_model(k).load_state_dict(checkpoint_states_cpu["model_state_dict"][k])
optimizers[k].load_state_dict(checkpoint_states["optimizer_state_dict"][k])
if not flags.actor_device_cpu:
for device in range(flags.num_actor_devices):
models[device].get_model(k).load_state_dict(learner_model.get_model(k).state_dict())
else:
for device in range(flags.num_actor_devices):
models[device].get_model(k).load_state_dict(checkpoint_states_cpu["model_state_dict"][k])
stats = checkpoint_states["stats"]
frames = checkpoint_states["frames"]
position_frames = checkpoint_states["position_frames"]
log.info(f"Resuming preempted job, current stats:\n{stats}")
# Starting actor processes
if flags.actor_device_cpu:
flags.num_actor_devices = 1
for device in range(flags.num_actor_devices):
num_actors = flags.num_actors
for i in range(flags.num_actors):
actor = ctx.Process(
target=act,
args=(i, device, free_queue[device], full_queue[device], models[device], buffers[device], flags))
actor.start()
actor_processes.append(actor)
def batch_and_learn(i, device, position, local_lock, position_lock, lock=threading.Lock()):
"""Thread target for the learning process."""
nonlocal frames, position_frames, stats
while frames < flags.total_frames:
batch = get_batch(free_queue[device][position], full_queue[device][position], buffers[device][position], flags, local_lock)
_stats = learn(position, models, learner_model.get_model(position), batch,
optimizers[position], flags, position_lock)
with lock:
for k in _stats:
stats[k] = _stats[k]
to_log = dict(frames=frames)
to_log.update({k: stats[k] for k in stat_keys})
plogger.log(to_log)
frames += T * B
position_frames[position] += T * B
for device in range(flags.num_actor_devices):
for m in range(flags.num_buffers):
free_queue[device]['landlord'].put(m)
free_queue[device]['landlord_up'].put(m)
free_queue[device]['landlord_down'].put(m)
threads = []
locks = [{'landlord': threading.Lock(), 'landlord_up': threading.Lock(), 'landlord_down': threading.Lock()} for _ in range(flags.num_actor_devices)]
position_locks = {'landlord': threading.Lock(), 'landlord_up': threading.Lock(), 'landlord_down': threading.Lock()}
for device in range(flags.num_actor_devices):
for i in range(flags.num_threads):
for position in ['landlord', 'landlord_up', 'landlord_down']:
thread = threading.Thread(
target=batch_and_learn, name='batch-and-learn-%d' % i, args=(i,device,position,locks[device][position],position_locks[position]))
thread.start()
threads.append(thread)
def checkpoint(frames):
if flags.disable_checkpoint:
return
log.info('Saving checkpoint to %s', checkpointpath)
_models = learner_model.get_models()
torch.save({
'model_state_dict': {k: _models[k].state_dict() for k in _models},
'optimizer_state_dict': {k: optimizers[k].state_dict() for k in optimizers},
"stats": stats,
'flags': vars(flags),
'frames': frames,
'position_frames': position_frames
}, checkpointpath)
# Save the weights for evaluation purpose
for position in ['landlord', 'landlord_up', 'landlord_down']:
model_weights_dir = os.path.expandvars(os.path.expanduser(
'%s/%s/%s' % (flags.savedir, flags.xpid, position+'_weights_'+str(frames)+'.ckpt')))
torch.save(learner_model.get_model(position).state_dict(), model_weights_dir)
fps_log = []
timer = timeit.default_timer
try:
last_checkpoint_time = timer() - flags.save_interval * 60
while frames < flags.total_frames:
start_frames = frames
position_start_frames = {k: position_frames[k] for k in position_frames}
start_time = timer()
time.sleep(5)
if timer() - last_checkpoint_time > flags.save_interval * 60:
checkpoint(frames)
last_checkpoint_time = timer()
end_time = timer()
fps = (frames - start_frames) / (end_time - start_time)
fps_avg = 0
fps_log.append(fps)
if len(fps_log) > 24:
fps_log = fps_log[1:]
for fps_record in fps_log:
fps_avg += fps_record
fps_avg = fps_avg / len(fps_log)
position_fps = {k:(position_frames[k]-position_start_frames[k])/(end_time-start_time) for k in position_frames}
log.info('After %i (L:%i U:%i D:%i) frames: @ %.1f fps (avg@ %.1f fps) (L:%.1f U:%.1f D:%.1f) Stats:\n%s',
frames,
position_frames['landlord'],
position_frames['landlord_up'],
position_frames['landlord_down'],
fps,
fps_avg,
position_fps['landlord'],
position_fps['landlord_up'],
position_fps['landlord_down'],
pprint.pformat(stats))
except KeyboardInterrupt:
return
else:
for thread in threads:
thread.join()
log.info('Learning finished after %d frames.', frames)
checkpoint(frames)
plogger.close()
|
Spreadsheet_Energy_System_Model_Generator.py | # -*- coding: utf-8 -*-
"""
Spreadsheet-Energy-System-Model-Generator.
creates an energy system from a given spreadsheet data file, solves it
for the purpose of least cost optimization, and returns the optimal
scenario results.
The scenario.xlsx-file must contain the following elements:
+-------------+--------------------------------------------------------+
|sheet | columns |
+=============+========================================================+
|energysystem | start_date, end_date, holidays, temporal resolution, |
| | timezone |
+-------------+--------------------------------------------------------+
|buses | label, active, excess, shortage, |
| | shortage costs /(CU/kWh), excess costs /(CU/kWh) |
+-------------+--------------------------------------------------------+
|sinks | label, active, input, input2, load profile, |
| | nominal value /(kW), annual demand /(kWh/a), |
| | occupants [RICHARDSON], building class [HEAT SLP ONLY],|
| | wind class [HEAT SLP ONLY], fixed |
+-------------+--------------------------------------------------------+
|sources | label, active, output, technology, |
| | variable costs /(CU/kWh), existing capacity /(kW), |
| | min. investment capacity /(kW), |
| | max. investment capacity /(kW), |
| | periodical costs /(CU/(kW a)), |
| | technology database (PV ONLY), |
| | inverter database (PV ONLY), Modul Model (PV ONLY), |
| | Inverter Model (PV ONLY), reference value /(kW), |
| | Azimuth (PV ONLY), Surface Tilt (PV ONLY), |
| | Albedo (PV ONLY), Altitude (PV ONLY), |
| | Latitude (PV ONLY), Longitude (PV ONLY) |
+-------------+--------------------------------------------------------+
|transformers | label, active, transformer type, input, output, |
| | output2, efficiency, efficiency2, |
| | variable input costs /(CU/kWh), |
| | variable output costs /(CU/kWh), |
| | existing capacity /(kW), |
| | max. investment capacity /(kW), |
| | min. investment capacity /(kW), |
| | periodical costs /(CU/(kW a)) |
+-------------+--------------------------------------------------------+
|storages | label, active, bus, existing capacity /(kW), |
| | min. investment capacity /(kW), |
| | max. investment capacity /(kW), |
| | periodical costs /(CU/(kW a)), capacity inflow, |
| | capacity outflow, capacity loss, efficiency inflow, |
| | efficiency outflow, initial capacity, capacity min, |
| | capacity max, variable input costs, |
| | variable output costs |
+-------------+--------------------------------------------------------+
|powerlines | label, active, bus_1, bus_2, (un)directed, efficiency, |
| | existing capacity /(kW), |
| | min. investment capacity /(kW), |
| | max. investment capacity /(kW), |
| | variable costs /(CU/kWh), periodical costs /(CU/(kW a))|
+-------------+--------------------------------------------------------+
|time_series | timestamp, |
| | timeseries for components with fixed input or output |
+-------------+--------------------------------------------------------+
|weather_data | dates(untitled), dhi, dirhi, pressure, temp_air, |
| | windspeed, z0 |
+-------------+--------------------------------------------------------+
Docs:
- https://spreadsheet-energy-system-model-generator.readthedocs.io/en/latest/
GIT:
https://github.com/chrklemm/SESMG
-------------------------------------------------------------------------------
Christian Klemm - christian.klemm@fh-muenster.de
"""
import logging
from oemof.tools import logger
import os
import pandas as pd
from threading import *
import sys
from program_files import (create_objects,
create_results,
create_energy_system,
optimize_model,
create_graph,
data_preparation)
def sesmg_main(scenario_file: str, result_path: str, num_threads: int,
graph: bool, criterion_switch: bool, xlsx_results: bool,
console_results: bool, timeseries_prep: list, solver: str):
"""
Main function of the Spreadsheet System Model Generator
:param scenario_file: The scenario_file must contain the
components specified above.
:type scenario_file: str ['xlsx']
:param result_path: path of the folder where the results
will be saved
:type result_path: str ['folder']
:param num_threads: number of threads that the method may use
:type num_threads: int
:param graph: defines if the graph should be created
:type graph: bool
:param results: defines if the results should be created
:type results: bool
:param plotly: defines if the plotly dash should be started
:type plotly: bool
Christian Klemm - christian.klemm@fh-muenster.de
"""
# SETS NUMBER OF THREADS FOR NUMPY
os.environ['NUMEXPR_NUM_THREADS'] = str(num_threads)
# DEFINES A LOGGING FILE
logger.define_logging(logpath=result_path)
# IMPORTS DATA FROM THE EXCEL FILE AND RETURNS IT AS DICTIONARY
nodes_data = create_energy_system.import_scenario(filepath=scenario_file)
# CRITERION SWITCH
if criterion_switch:
data_preparation.change_optimization_criterion(nodes_data)
if sys.platform.startswith("win"):
scheme_path = \
os.path.join(os.path.dirname(__file__)
+ r'\technical_data\hierarchical_selection'
r'_schemes.xlsx')
else:
scheme_path = \
os.path.join(os.path.dirname(__file__)
+ r'/technical_data/hierarchical_selection'
r'_schemes.xlsx')
# Timeseries Preprocessing
data_preparation.timeseries_preparation(timeseries_prep_param=timeseries_prep,
nodes_data=nodes_data,
scheme_path=scheme_path,
result_path=result_path)
if timeseries_prep[0] != 'none':
scenario_file = result_path + "/modified_scenario.xlsx"
# CREATES AN ENERGYSYSTEM AS DEFINED IN THE SCENARIO FILE
esys = create_energy_system.define_energy_system(nodes_data=nodes_data)
weather_data = nodes_data['weather data']
time_series = nodes_data['timeseries']
# CREATES AN LIST OF COMPONENTS
nodes = []
# CREATES BUS OBJECTS, EXCESS SINKS, AND SHORTAGE SOURCES AS DEFINED IN THE
# SCENARIO FILE AND ADDS THEM TO THE lIST OF COMPONENTS
busd = create_objects.buses(nodes_data=nodes_data,
nodes=nodes)
# PARALLEL CREATION OF ALL OBJECTS OF THE SCENARIO FILE
# CREATES SOURCE OBJECTS AS DEFINED IN THE SCENARIO FILE AND ADDS THEM TO
# THE lIST OF COMPONENTS
t1 = Thread(target=create_objects.Sources,
args=(nodes_data, nodes, busd, time_series, weather_data))
t1.start()
# CREATES SINK OBJECTS AS DEFINED IN THE SCENARIO FILE AND ADDS THEM TO
# THE lIST OF COMPONENTS
t2 = Thread(target=create_objects.Sinks, args=(nodes_data, busd,
nodes, time_series,
weather_data))
t2.start()
# CREATES TRANSFORMER OBJECTS AS DEFINED IN THE SCENARIO FILE AND ADDS THEM
# TO THE lIST OF COMPONENTS
t3 = Thread(target=create_objects.Transformers, args=(nodes_data, nodes,
busd, weather_data))
t3.start()
# CREATES STORAGE OBJECTS AS DEFINED IN THE SCENARIO FILE AND ADDS THEM TO
# THE lIST OF COMPONENTS
t4 = Thread(target=create_objects.Storages, args=(nodes_data, nodes,
busd,))
t4.start()
# CREATES LINK OBJECTS AS DEFINED IN THE SCENARIO FILE AND ADDS THEM TO
# THE lIST OF COMPONENTS
t5 = Thread(target=create_objects.Links, args=(nodes_data, nodes, busd,))
t5.start()
# WAIT UNTIL THE THREADS HAVE DONE THEIR JOBS
t1.join()
t2.join()
t3.join()
t4.join()
t5.join()
# ADDS THE COMPONENTS TO THE ENERGYSYSTEM
esys.add(*nodes)
# PRINTS A GRAPH OF THE ENERGY SYSTEM
create_graph.create_graph(filepath=result_path, nodes_data=nodes_data,
show=graph)
# OPTIMIZES THE ENERGYSYSTEM AND RETURNS THE OPTIMIZED ENERGY SYSTEM
om = optimize_model.least_cost_model(esys, num_threads, nodes_data, busd, solver)
# SHOWS AND SAVES RESULTS OF THE OPTIMIZED MODEL / POST-PROCESSING
if xlsx_results:
create_results.xlsx(nodes_data=nodes_data, optimization_model=om,
filepath=result_path)
# CREATES PLOTLY RESULTS AND LOGS RESULTS OF CBC SOLVER
create_results.Results(nodes_data, om, esys, result_path,
console_log=console_results)
logging.info(' ' + '----------------------------------------------'
'----------')
logging.info(' ' + 'Modelling and optimization successfully completed!')
|
sound_utils.py | #!/usr/bin/env python
import rospy
from Queue import Queue
from threading import Thread
from sound_play.msg import SoundRequest
from sound_play.libsoundplay import SoundClient
class SoundUtils():
INSTANCE = None
@classmethod
def get_instance(cls):
if cls.INSTANCE is None:
cls.INSTANCE = SoundUtils()
return cls.INSTANCE
def __init__(self):
if self.INSTANCE is not None:
raise ValueError("An instantiation already exists!")
# rospy.init_node('say', anonymous = True)
self.voice = ''
self.volume = 1.0
self.soundhandle = SoundClient()
self.last_message = ''
self.last_time_said = rospy.Time(0)
self.time_sleep_in_sec = rospy.Time(2).to_sec()
self.time_delay_in_sec = rospy.Time(30).to_sec()
self.previous_messages = {}
self.message_queue = Queue()
self.thread = Thread(target=self.observe_queue)
self.thread.start()
# self._trigger_cond = threading.Condition()
# rospy.sleep(1)
def observe_queue(self):
while True:
if not self.message_queue.empty():
self.soundhandle.say(self.message_queue.get())
rospy.sleep(self.time_sleep_in_sec)
pass
def say_message(self,message):
# if message == self.last_message:
# return
now = rospy.Time().now()
if message in self.previous_messages:
last_time_said = self.previous_messages[message]
diff_in_sec = (now - last_time_said).to_sec()
if diff_in_sec < self.time_delay_in_sec:
return
rospy.loginfo(message)
self.previous_messages[message] = now
self.message_queue.put(message)
def say_message2(self,message):
# if message == self.last_message:
# return
now = rospy.Time().now()
diff_in_sec = (now - self.last_time_said).to_sec()
if diff_in_sec < self.time_sleep_in_sec:
return
self.last_message = message
self.last_time_said = now
self.soundhandle.say(message, self.voice, self.volume)
# if __name__ == '__main__':
# try:
# SoundUtils()
# rospy.sleep(1)
# except:
# pass
|
lambda_executors.py | import os
import re
import json
import time
import logging
import threading
import subprocess
# from datetime import datetime
from multiprocessing import Process, Queue
try:
from shlex import quote as cmd_quote
except ImportError:
# for Python 2.7
from pipes import quote as cmd_quote
from localstack import config
from localstack.utils.common import run, TMP_FILES, short_uid, save_file, to_str, cp_r
from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'
EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER
LAMBDA_RUNTIME_PYTHON27 = 'python2.7'
LAMBDA_RUNTIME_PYTHON36 = 'python3.6'
LAMBDA_RUNTIME_NODEJS = 'nodejs'
LAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'
LAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'
LAMBDA_RUNTIME_JAVA8 = 'java8'
LAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'
LAMBDA_RUNTIME_GOLANG = 'go1.x'
LAMBDA_RUNTIME_RUBY = 'ruby'
LAMBDA_RUNTIME_RUBY25 = 'ruby2.5'
LAMBDA_EVENT_FILE = 'event_file.json'
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME = 600
class LambdaExecutor(object):
""" Base class for Lambda executors. Subclasses must overwrite the execute method """
def __init__(self):
pass
def execute(self, func_arn, func_details, event, context=None, version=None, asynchronous=False):
raise Exception('Not implemented.')
def startup(self):
pass
def cleanup(self, arn=None):
pass
def run_lambda_executor(self, cmd, env_vars={}, asynchronous=False):
process = run(cmd, asynchronous=True, stderr=subprocess.PIPE, outfile=subprocess.PIPE, env_vars=env_vars)
if asynchronous:
result = '{"asynchronous": "%s"}' % asynchronous
log_output = 'Lambda executed asynchronously'
else:
return_code = process.wait()
result = to_str(process.stdout.read())
log_output = to_str(process.stderr.read())
if return_code != 0:
raise Exception('Lambda process returned error status code: %s. Output:\n%s' %
(return_code, log_output))
return result, log_output
# holds information about an existing container.
class ContainerInfo:
"""
Contains basic information about a docker container.
"""
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
""" Abstract executor class for executing Lambda functions in Docker containers """
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
raise Exception('Not implemented')
def execute(self, func_arn, func_details, event, context=None, version=None, asynchronous=False):
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
environment = func_details.envvars.copy()
# configure USE_SSL in environment
if config.USE_SSL:
environment['USE_SSL'] = '1'
# prepare event body
if not event:
LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(event)
event_body_escaped = event_body.replace("'", "\\'")
docker_host = config.DOCKER_HOST_FROM_CONTAINER
# amend the environment variables for execution
environment['AWS_LAMBDA_EVENT_BODY'] = event_body_escaped
environment['HOSTNAME'] = docker_host
environment['LOCALSTACK_HOSTNAME'] = docker_host
if context:
environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name
environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version
environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn
# custom command to execute in the container
command = ''
# if running a Java Lambda, set up classpath arguments
if runtime == LAMBDA_RUNTIME_JAVA8:
# copy executor jar into temp directory
cp_r(LAMBDA_EXECUTOR_JAR, lambda_cwd)
# TODO cleanup once we have custom Java Docker image
taskdir = '/var/task'
save_file(os.path.join(lambda_cwd, LAMBDA_EVENT_FILE), event_body)
command = ("bash -c 'cd %s; java -cp .:`ls *.jar | tr \"\\n\" \":\"` \"%s\" \"%s\" \"%s\"'" %
(taskdir, LAMBDA_EXECUTOR_CLASS, handler, LAMBDA_EVENT_FILE))
# determine the command to be executed (implemented by subclasses)
cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)
# lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!
LOG.debug('Running lambda cmd: %s' % cmd)
result, log_output = self.run_lambda_executor(cmd, environment, asynchronous)
LOG.debug('Lambda result / log output:\n%s\n>%s' % (result.strip(), log_output.strip().replace('\n', '\n> ')))
return result, log_output
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
""" Executor class for executing Lambda functions in re-usable Docker containers """
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# set the invocation time
self.function_invoke_times[func_arn] = time.time()
# create/verify the docker container is running.
LOG.debug('Priming docker container with runtime "%s" and arn "%s".', runtime, func_arn)
container_info = self.prime_docker_container(runtime, func_arn, env_vars.items(), lambda_cwd)
# Note: currently "docker exec" does not support --env-file, i.e., environment variables can only be
# passed directly on the command line, using "-e" below. TODO: Update this code once --env-file is
# available for docker exec, to better support very large Lambda events (very long environment values)
exec_env_vars = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
if not command:
command = '%s %s' % (container_info.entry_point, handler)
# determine files to be copied into the container
copy_command = ''
event_file = os.path.join(lambda_cwd, LAMBDA_EVENT_FILE)
if not has_been_invoked_before:
# if this is the first invocation: copy the entire folder into the container
copy_command = 'docker cp "%s/." "%s:/var/task"; ' % (lambda_cwd, container_info.name)
elif os.path.exists(event_file):
# otherwise, copy only the event file if it exists
copy_command = 'docker cp "%s" "%s:/var/task"; ' % (event_file, container_info.name)
cmd = (
'%s' # copy files command
'docker exec'
' %s' # env variables
' %s' # container name
' %s' # run cmd
) % (copy_command, exec_env_vars, container_info.name, command)
return cmd
def startup(self):
self.cleanup()
# start a process to remove idle containers
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):
"""
Prepares a persistent docker container for a specific function.
:param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
:param func_arn: The ARN of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming docker container (status "%s"): %s' % (status, container_name))
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])
network = config.LAMBDA_DOCKER_NETWORK
network_str = ' --network="%s" ' % network if network else ''
# Create and start the container
LOG.debug('Creating container: %s' % container_name)
cmd = (
'docker create'
' --rm'
' --name "%s"'
' --entrypoint /bin/bash' # Load bash when it starts.
' --interactive' # Keeps the container running bash.
' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
' -e HOSTNAME="$HOSTNAME"'
' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
' %s' # env_vars
' %s' # network
' lambci/lambda:%s'
) % (container_name, env_vars_str, network_str, runtime)
LOG.debug(cmd)
run(cmd)
LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd))
cmd = (
'docker cp'
' "%s/." "%s:/var/task"'
) % (lambda_cwd, container_name)
LOG.debug(cmd)
run(cmd)
LOG.debug('Starting container: %s' % container_name)
cmd = 'docker start %s' % (container_name)
LOG.debug(cmd)
run(cmd)
# give the container some time to start up
time.sleep(1)
# Get the entry point for the image.
LOG.debug('Getting the entrypoint for image: lambci/lambda:%s' % runtime)
cmd = (
'docker image inspect'
' --format="{{ .ContainerConfig.Entrypoint }}"'
' lambci/lambda:%s'
) % (runtime)
LOG.debug(cmd)
run_result = run(cmd)
entry_point = run_result.strip('[]\n\r ')
container_network = self.get_docker_container_network(func_arn)
LOG.debug('Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network))
return ContainerInfo(container_name, entry_point)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug('Stopping container: %s' % container_name)
cmd = (
'docker stop -t0 %s'
) % (container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug('Removing container: %s' % container_name)
cmd = (
'docker rm %s'
) % (container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug('Getting all lambda containers names.')
cmd = 'docker ps -a --filter="name=localstack_lambda_*" --format "{{.Names}}"'
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()
if len(cmd_result) > 0:
container_names = cmd_result.split('\n')
else:
container_names = []
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug('Removing %d containers.' % len(container_names))
for container_name in container_names:
cmd = 'docker rm -f %s' % container_name
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
# Check if the container is already running
# Note: filtering by *exact* name using regex filter '^...$' seems unstable on some
# systems. Therefore, we use a combination of filter and grep to get the results.
cmd = ('docker ps -a --filter name=\'%s\' '
'--format "{{ .Status }} - {{ .Names }}" '
'| grep -w "%s" | cat') % (container_name, container_name)
LOG.debug('Getting status for container "%s": %s' % (container_name, cmd))
cmd_result = run(cmd)
# If the container doesn't exist. Create and start it.
container_status = cmd_result.strip()
if len(container_status) == 0:
return 0
if container_status.lower().startswith('up '):
return 1
return -1
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ''
# Get the container name.
container_name = self.get_container_name(func_arn)
# Get the container network
LOG.debug('Getting container network: %s' % container_name)
cmd = (
'docker inspect %s'
' --format "{{ .HostConfig.NetworkMode }}"'
) % (container_name)
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
container_network = cmd_result.strip()
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME.
:return: None
"""
LOG.info('Checking if there are idle containers.')
current_time = time.time()
for func_arn, last_run_time in self.function_invoke_times.items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
entrypoint = ''
if command:
entrypoint = ' --entrypoint ""'
else:
command = '"%s"' % handler
env_vars_string = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
network = config.LAMBDA_DOCKER_NETWORK
network_str = ' --network="%s" ' % network if network else ''
if config.LAMBDA_REMOTE_DOCKER:
cmd = (
'CONTAINER_ID="$(docker create'
' %s'
' %s'
' %s' # network
' "lambci/lambda:%s" %s'
')";'
'docker cp "%s/." "$CONTAINER_ID:/var/task";'
'docker start -a "$CONTAINER_ID";'
) % (entrypoint, env_vars_string, network_str, runtime, command, lambda_cwd)
else:
lambda_cwd_on_host = self.get_host_path_for_path_in_docker(lambda_cwd)
cmd = (
'docker run'
'%s -v "%s":/var/task'
' %s'
' %s' # network
' --rm'
' "lambci/lambda:%s" %s'
) % (entrypoint, lambda_cwd_on_host, env_vars_string, network_str, runtime, command)
return cmd
def get_host_path_for_path_in_docker(self, path):
return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
r'%s/\1' % config.HOST_TMP_FOLDER, path)
class LambdaExecutorLocal(LambdaExecutor):
def execute(self, func_arn, func_details, event, context=None, version=None, asynchronous=False):
lambda_cwd = func_details.cwd
environment = func_details.envvars.copy()
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
if lambda_cwd:
os.chdir(lambda_cwd)
if environment:
os.environ.update(environment)
result = lambda_function(event, context)
queue.put(result)
process = Process(target=do_execute)
process.run()
result = queue.get()
# TODO capture log output during local execution?
log_output = ''
return result, log_output
def execute_java_lambda(self, event, context, handler, main_file):
event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
save_file(event_file, json.dumps(event))
TMP_FILES.append(event_file)
class_name = handler.split('::')[0]
classpath = '%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file)
cmd = 'java -cp %s %s %s %s' % (classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
asynchronous = False
# flip asynchronous flag depending on origin
if 'Records' in event:
# TODO: add more event supporting asynchronous lambda execution
if 'Sns' in event['Records'][0]:
asynchronous = True
if 'dynamodb' in event['Records'][0]:
asynchronous = True
result, log_output = self.run_lambda_executor(cmd, asynchronous=asynchronous)
LOG.debug('Lambda result / log output:\n%s\n> %s' % (result.strip(), log_output.strip().replace('\n', '\n> ')))
return result, log_output
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_LOCAL
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
'local': EXECUTOR_LOCAL,
'docker': EXECUTOR_CONTAINERS_SEPARATE,
'docker-reuse': EXECUTOR_CONTAINERS_REUSE
}
|
tests.py | # coding=utf-8
import random
import time
import threading
import unittest
from lru_cache import LruCache
class TesLruCache(unittest.TestCase):
def test_cache_normal(self):
a = []
@LruCache(maxsize=2, timeout=1)
def bar(num):
a.append(num)
return num
bar(1)
bar(1)
self.assertEqual(a, [1])
def test_cache_none(self):
a = []
@LruCache(maxsize=2, timeout=1)
def bar(num):
a.append(num)
return None
bar(1)
bar(1)
self.assertEqual(a, [1])
def test_cache_when_timeout(self):
a = []
@LruCache(maxsize=2, timeout=1)
def bar(num):
a.append(num)
return num
bar(2)
time.sleep(2)
bar(2)
self.assertEqual(a, [2, 2])
def test_cache_when_cache_is_full(self):
a = []
@LruCache(maxsize=2, timeout=1)
def bar(num):
a.append(num)
return num
bar(1)
bar(2)
bar(3)
bar(1)
self.assertEqual(a, [1, 2, 3, 1])
def test_cache_with_multi_thread(self):
a = []
@LruCache(maxsize=10, timeout=1)
def bar(num):
a.append(num)
return num
for i in xrange(10):
threading.Thread(target=bar, args=(i, )).start()
main_thread = threading.currentThread()
for t in threading.enumerate():
if t is not main_thread:
t.join()
bar(random.randint(0, 9))
self.assertEqual(set(a), set(range(10)))
def test_cache_with_multi_thread_two_func(self):
a = []
@LruCache(maxsize=10, timeout=1)
def bar(num):
a.append(num)
return num
b = []
@LruCache(maxsize=10, timeout=1)
def bar(num):
b.append(num)
return num + 1
for i in xrange(10):
threading.Thread(target=bar, args=(i, )).start()
threading.Thread(target=bar, args=(i, )).start()
main_thread = threading.currentThread()
for t in threading.enumerate():
if t is not main_thread:
t.join()
feed = random.randint(0, 9)
self.assertEqual(bar(feed), feed)
self.assertEqual(bar(feed), feed + 1)
self.assertEqual(set(a), set(range(10)))
self.assertEqual(set(b), set(range(10)))
def test_cache_when_timeout_and_maxsize_is_none(self):
a = []
@LruCache()
def bar(num):
a.append(num)
return num
bar(1)
bar(1)
self.assertEqual(a, [1])
def test_cache_when_timeout_is_none(self):
a = []
@LruCache(maxsize=10)
def bar(num):
a.append(num)
return num
bar(1)
bar(1)
self.assertEqual(a, [1])
def test_cache_when_only_maxsize_is_none_normal(self):
a = []
@LruCache(timeout=2)
def bar(num):
a.append(num)
return num
bar(1)
bar(1)
self.assertEqual(a, [1])
def test_cache_when_only_maxsize_is_none_timeout(self):
a = []
@LruCache(timeout=1)
def bar(num):
a.append(num)
return num
bar(1)
time.sleep(2)
bar(1)
self.assertEqual(a, [1, 1])
def test_cache_when_only_maxsize_is_none_normal_method(self):
a = []
class Func(object):
@LruCache(timeout=2)
def bar(self, num):
a.append(num)
return num
fun = Func()
fun.bar(1)
fun.bar(1)
self.assertEqual(a, [1])
def test_cache_when_only_maxsize_is_none_normal_method_timeout(self):
a = []
class Func(object):
@LruCache(timeout=1)
def bar(self, num):
a.append(num)
return num
fun = Func()
fun.bar(1)
time.sleep(2)
fun.bar(1)
self.assertEqual(a, [1, 1])
def test_invalidate(self):
a = []
@LruCache()
def bar(num):
a.append(num)
return num
bar(1)
bar(1)
self.assertEqual(a, [1])
bar.invalidate(1)
bar(1)
self.assertEqual(a, [1, 1])
if __name__ == "__main__":
unittest.main()
|
tomostream3d.py |
'''
Adaptation of Tomostream orthoslice code for doing full 3d reconstructions apply DL-based image processing or computer vision steps.
Authors:
Viktor Nikitin, ANL
Aniket Tekawade, ANL
'''
import pvaccess as pva
import numpy as np
import queue
import time
import h5py
import threading
import signal
from tomostream import util
from tomostream import log
# import solver_orig as solver
import solver3d as solver
#from tomostream import solver
from epics import PV
from noise2self import *
torch_model_path = '/home/beams9/7BMB/solder_beamtime/solder_imaging/mdl-ep00099.pth'
class EncoderStream():
""" Class for streaming reconstuction of ortho-slices on a machine with GPU.
The class creates and broadcasts a pva type pv for concatenated reconstructions
of (x,y,z) ortho-slices. Reconstructons are done by the FBP formula
with direct discretization of the circular integral.
Projection data is taken from the detector pv (pva type channel)
and stored in a queue, dark and flat fields are taken from the pv broadcasted
by the server on the detector machine (see tomoscan_stream.py from Tomoscan package).
Parameters
----------
args : dict
Dictionary of pv variables.
"""
def __init__(self, pv_files, macros):
log.setup_custom_logger("./encoderstream.log")
# init pvs
self.config_pvs = {}
self.control_pvs = {}
self.pv_prefixes = {}
if not isinstance(pv_files, list):
pv_files = [pv_files]
for pv_file in pv_files:
self.read_pv_file(pv_file, macros)
self.show_pvs()
self.epics_pvs = {**self.config_pvs, **self.control_pvs}
prefix = self.pv_prefixes['TomoScan']
# tomoscan pvs
self.epics_pvs['FrameType'] = PV(prefix + 'FrameType')
self.epics_pvs['NumAngles'] = PV(prefix + 'NumAngles')
self.epics_pvs['RotationStep'] = PV(prefix + 'RotationStep')
# Replace PSOPVPrefix to link to check a TomoScanStream PV so it returns if scan IOC is down
# self.epics_pvs['PSOPVPrefix'] = PV(prefix + 'PSOPVPrefix')
# if self.epics_pvs['PSOPVPrefix'].get(as_string=True) == None:
# log.error("TomoScan is down")
# log.error("Type exit() here and start TomoScan first")
# return
# pva type channel for flat and dark fields pv broadcasted from the detector machine
self.epics_pvs['PvaDark'] = pva.Channel(self.epics_pvs['DarkPVAName'].get())
self.pva_dark = self.epics_pvs['PvaDark']
self.epics_pvs['PvaFlat'] = pva.Channel(self.epics_pvs['FlatPVAName'].get())
self.pva_flat = self.epics_pvs['PvaFlat']
self.epics_pvs['PvaTheta'] = pva.Channel(self.epics_pvs['ThetaPVAName'].get())
self.pva_theta = self.epics_pvs['PvaTheta']
# pva type channel that contains projection and metadata
image_pv_name = PV(self.epics_pvs['ImagePVAPName'].get()).get()
self.epics_pvs['PvaPImage'] = pva.Channel(image_pv_name + 'Image')
self.epics_pvs['PvaPDataType_RBV'] = pva.Channel(image_pv_name + 'DataType_RBV')
self.pva_plugin_image = self.epics_pvs['PvaPImage']
# create pva type pv for reconstrucion by copying metadata from the data pv, but replacing the sizes
# This way the ADViewer (NDViewer) plugin can be also used for visualizing reconstructions.
pva_image_data = self.pva_plugin_image.get('')
pva_image_dict = pva_image_data.getStructureDict()
self.pv_rec = pva.PvObject(pva_image_dict)
# run server for reconstruction pv
recon_pva_name = self.epics_pvs['ReconPVAName'].get()
self.server_rec = pva.PvaServer(recon_pva_name, self.pv_rec)
self.epics_pvs['StartRecon'].put('Done')
self.epics_pvs['AbortRecon'].put('Yes')
self.epics_pvs['StartRecon'].add_callback(self.pv_callback)
self.epics_pvs['AbortRecon'].add_callback(self.pv_callback)
self.slv = None
# Set ^C, ^Z interrupt to abort the stream reconstruction
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTSTP, self.signal_handler)
# Start the watchdog timer thread
thread = threading.Thread(target=self.reset_watchdog, args=(), daemon=True)
thread.start()
def pv_callback(self, pvname=None, value=None, char_value=None, **kw):
"""Callback function that is called by pyEpics when certain EPICS PVs are changed
The PVs that are handled are:
- ``StartScan`` : Calls ``run_fly_scan()``
- ``AbortScan`` : Calls ``abort_scan()``
"""
log.debug('pv_callback pvName=%s, value=%s, char_value=%s', pvname, value, char_value)
if (pvname.find('StartRecon') != -1) and (value == 1):
thread = threading.Thread(target=self.begin_stream, args=())
thread.start()
elif (pvname.find('AbortRecon') != -1) and (value == 0):
thread = threading.Thread(target=self.abort_stream, args=())
thread.start()
def signal_handler(self, sig, frame):
"""Calls abort_scan when ^C or ^Z is typed"""
if (sig == signal.SIGINT) or (sig == signal.SIGTSTP):
self.abort_stream()
def reset_watchdog(self):
"""Sets the watchdog timer to 5 every 3 seconds"""
while True:
self.epics_pvs['Watchdog'].put(5)
time.sleep(3)
def reinit_monitors(self):
"""Reinit pv monitoring functions with updating data sizes"""
log.warning('reinit monitors with updating data sizes')
# stop monitors
self.pva_dark.stopMonitor()
self.pva_flat.stopMonitor()
self.pva_plugin_image.stopMonitor()
while(self.pva_dark.isMonitorActive() or
self.pva_flat.isMonitorActive() or
self.pva_plugin_image.isMonitorActive()):
time.sleep(0.01)
time.sleep(0.5)# need to wait for some reason? to check
# take new data sizes
pva_image_data = self.pva_plugin_image.get('')
width = pva_image_data['dimension'][0]['size']
height = pva_image_data['dimension'][1]['size']
self.pv_rec['dimension'] = [{'size': 3*width, 'fullSize': 3*width, 'binning': 1},
{'size': width, 'fullSize': width, 'binning': 1}]
# self.theta = self.epics_pvs['ThetaArray'].get()[:self.epics_pvs['NumAngles'].get()]
self.theta = self.pva_theta.get()['value']
print(self.theta)
#exit()
# update limits on sliders
# epics_pvs['OrthoXlimit'].put(width-1)
# epics_pvs['OrthoYlimit'].put(width-1)
# epics_pvs['OrthoZlimit'].put(height-1)
## create a queue to store projections
# find max size of the queue, the size is equal to the number of angles in the interval of size pi
if(max(self.theta)<180):
buffer_size = len(self.theta)
else:
dtheta = self.theta[1]-self.theta[0]
buffer_size = np.where(self.theta-self.theta[0]>180-dtheta)[0][0]
if(buffer_size*width*height>pow(2,32)):
log.error('buffer_size %s not enough memory', buffer_size)
exit(0)
# queue
self.data_queue = queue.Queue(maxsize=buffer_size)
# self.recon_queue = queue.Queue(maxsize=1)
# take datatype
datatype_list = self.epics_pvs['PvaPDataType_RBV'].get()['value']
self.datatype = datatype_list['choices'][datatype_list['index']].lower()
# update parameters from in the GUI
center = self.epics_pvs['Center'].get()
idx = self.epics_pvs['OrthoX'].get()
idy = self.epics_pvs['OrthoY'].get()
idz = self.epics_pvs['OrthoZ'].get()
rotx = self.epics_pvs['RotX'].get()
roty = self.epics_pvs['RotY'].get()
rotz = self.epics_pvs['RotZ'].get()
fbpfilter = self.epics_pvs['FilterType'].get(as_string=True)
dezinger = self.epics_pvs['Dezinger'].get(as_string=False)
if hasattr(self,'width'): # update parameters for new sizes
self.epics_pvs['Center'].put(center*width/self.width)
self.epics_pvs['OrthoX'].put(int(idx*width/self.width))
self.epics_pvs['OrthoY'].put(int(idy*width/self.width))
self.epics_pvs['OrthoZ'].put(int(idz*width/self.width))
## create solver class on GPU
self.slv = solver.Solver(buffer_size, width, height,
center, idx, idy, idz, rotx, roty, rotz, fbpfilter, dezinger, self.datatype)
self.slv.dn_model = load_denoise_model(torch_model_path)
# temp buffers for storing data taken from the queue
self.proj_buffer = np.zeros([buffer_size, width*height], dtype=self.datatype)
self.theta_buffer = np.zeros(buffer_size, dtype='float32')
self.ids_buffer = np.zeros(buffer_size, dtype='int32')
self.width = width
self.height = height
self.buffer_size = buffer_size
## start PV monitoring
# start monitoring dark and flat fields pv
self.pva_dark.monitor(self.add_dark,'')
self.pva_flat.monitor(self.add_flat,'')
# start monitoring projection data
self.pva_plugin_image.monitor(self.add_data,'')
self.stream_is_running = True
def add_data(self, pv):
"""PV monitoring function for adding projection data and corresponding angle to the queue"""
frame_type = self.epics_pvs['FrameType'].get(as_string=True)
if(self.stream_is_running and self.epics_pvs['FrameType'].get(as_string=True) == 'Projection'):
cur_id = pv['uniqueId'] # unique projection id for determining angles and places in the buffers
# write projection, theta, and id into the queue
data_item = {'projection': pv['value'][0][util.type_dict[self.datatype]],
'theta': self.theta[min(cur_id,len(self.theta)-1)],
'id': np.mod(cur_id, self.buffer_size)
}
if(not self.data_queue.full()):
self.data_queue.put(data_item)
else:
log.warning("queue is full, skip frame")
# pass
# log.info('id: %s type %s queue size %s', cur_id, frame_type, self.data_queue.qsize())
def add_dark(self, pv):
"""PV monitoring function for reading new dark fields from manually running pv server
on the detector machine"""
if(self.stream_is_running and len(pv['value'])==self.width*self.height): # if pv with dark field has cocrrect sizes
data = pv['value'].reshape(self.height, self.width)
self.slv.set_dark(data)
print('Norm dark', np.linalg.norm(data))
log.error('new dark fields acquired')
def add_flat(self, pv):
"""PV monitoring function for reading new flat fields from manually running pv server
on the detector machine"""
if(self.stream_is_running and len(pv['value'])==self.width*self.height): # if pv with flat has correct sizes
data = pv['value'].reshape(self.height, self.width)
self.slv.set_flat(data)
print('Norm flat', np.linalg.norm(data))
log.error('new flat fields acquired')
def begin_stream(self):
"""Run streaming reconstruction by sending new incoming projections from the queue to the solver class,
and broadcasting the reconstruction result to a pv variable
"""
self.reinit_monitors()
self.epics_pvs['ReconStatus'].put('Running')
while(self.stream_is_running):
# take parameters from the GUI
center = self.epics_pvs['Center'].get()
idx = self.epics_pvs['OrthoX'].get()
idy = self.epics_pvs['OrthoY'].get()
idz = self.epics_pvs['OrthoZ'].get()
rotx = self.epics_pvs['RotX'].get()
roty = self.epics_pvs['RotY'].get()
rotz = self.epics_pvs['RotZ'].get()
fbpfilter = self.epics_pvs['FilterType'].get(as_string=True)
dezinger = self.epics_pvs['Dezinger'].get(as_string=False)
# take items from the queue
nitem = 0
while ((not self.data_queue.empty()) and (nitem < self.buffer_size)):
item = self.data_queue.get()
# reinit if data sizes were updated (e.g. after data binning by ROI1)
if(len(item['projection'])!=self.width*self.height):
self.reinit_monitors()
self.proj_buffer[nitem] = item['projection']
self.theta_buffer[nitem] = item['theta']
self.ids_buffer[nitem] = item['id']
nitem += 1
if(nitem == 0):
continue
# log.info('center %s: idx, idy, idz: %s %s %s, rotx, roty, rotz: %s %s %s, filter: %s, dezinger: %s', center, idx, idy, idz, rotx, roty, rotz, fbpfilter, dezinger)
# reconstruct on GPU
util.tic()
# log.info("DATA SHAPE: %s"%str(self.proj_buffer[:nitem].shape))
rec = self.slv.recon_optimized(
self.proj_buffer[:nitem], self.theta_buffer[:nitem], self.ids_buffer[:nitem], center, idx, idy, idz, rotx, roty, rotz, fbpfilter, dezinger)
self.epics_pvs['ReconTime'].put(util.toc())
self.epics_pvs['BufferSize'].put(f'{nitem}/{self.buffer_size}')
# write result to pv
self.pv_rec['value'] = ({'floatValue': rec.flatten()},)
self.epics_pvs['StartRecon'].put('Done')
self.epics_pvs['ReconStatus'].put('Stopped')
def abort_stream(self):
"""Aborts streaming that is running.
"""
self.epics_pvs['ReconStatus'].put('Aborting reconstruction')
if(self.slv is not None):
self.slv.free()
self.stream_is_running = False
def read_pv_file(self, pv_file_name, macros):
"""Reads a file containing a list of EPICS PVs to be used by TomoScan.
Parameters
----------
pv_file_name : str
Name of the file to read
macros: dict
Dictionary of macro substitution to perform when reading the file
"""
pv_file = open(pv_file_name)
lines = pv_file.read()
pv_file.close()
lines = lines.splitlines()
for line in lines:
is_config_pv = True
if line.find('#controlPV') != -1:
line = line.replace('#controlPV', '')
is_config_pv = False
line = line.lstrip()
# Skip lines starting with #
if line.startswith('#'):
continue
# Skip blank lines
if line == '':
continue
pvname = line
# Do macro substitution on the pvName
for key in macros:
pvname = pvname.replace(key, macros[key])
# Replace macros in dictionary key with nothing
dictentry = line
for key in macros:
dictentry = dictentry.replace(key, '')
epics_pv = PV(pvname)
if is_config_pv:
self.config_pvs[dictentry] = epics_pv
else:
self.control_pvs[dictentry] = epics_pv
# if dictentry.find('PVAPName') != -1:
# pvname = epics_pv.value
# key = dictentry.replace('PVAPName', '')
# self.control_pvs[key] = PV(pvname)
if dictentry.find('PVName') != -1:
pvname = epics_pv.value
key = dictentry.replace('PVName', '')
self.control_pvs[key] = PV(pvname)
if dictentry.find('PVPrefix') != -1:
pvprefix = epics_pv.value
key = dictentry.replace('PVPrefix', '')
self.pv_prefixes[key] = pvprefix
def show_pvs(self):
"""Prints the current values of all EPICS PVs in use.
The values are printed in three sections:
- config_pvs : The PVs that are part of the scan configuration and
are saved by save_configuration()
- control_pvs : The PVs that are used for EPICS control and status,
but are not saved by save_configuration()
- pv_prefixes : The prefixes for PVs that are used for the areaDetector camera,
file plugin, etc.
"""
print('configPVS:')
for config_pv in self.config_pvs:
print(config_pv, ':', self.config_pvs[config_pv].get(as_string=True))
print('')
print('controlPVS:')
for control_pv in self.control_pvs:
print(control_pv, ':', self.control_pvs[control_pv].get(as_string=True))
print('')
print('pv_prefixes:')
for pv_prefix in self.pv_prefixes:
print(pv_prefix, ':', self.pv_prefixes[pv_prefix])
|
store.py | import datetime
import json
import threading
import uuid
from collections import defaultdict
from copy import deepcopy
from dictdiffer import diff
from inspect import signature
from threading import Lock
from pathlib import Path
from tzlocal import get_localzone
from .logger import logger
from .settings import CACHE_DIR
from .utils import extract_id
class MissingClass(object):
def __bool__(self):
return False
Missing = MissingClass()
class Callback(object):
def __init__(
self, callback, record, callback_id=None, extra_kwargs={}, watch_children=True
):
self.callback = callback
self.record = record
self.callback_id = callback_id or str(uuid.uuid4())
self.extra_kwargs = extra_kwargs
def __call__(self, difference, old_val, new_val):
kwargs = {}
kwargs.update(self.extra_kwargs)
kwargs["record"] = self.record
kwargs["callback_id"] = self.callback_id
kwargs["difference"] = difference
kwargs["changes"] = self.record._convert_diff_to_changelist(
difference, old_val, new_val
)
logger.debug("Firing callback {} with kwargs: {}".format(self.callback, kwargs))
# trim down the parameters we'll be passing, to include only those the callback will accept
params = signature(self.callback).parameters
if not any(["**" in str(param) for param in params.values()]):
# there's no "**kwargs" in the callback signature, so remove any unaccepted params
for arg in list(kwargs.keys()):
if arg not in params:
del kwargs[arg]
# perform the callback, gracefully handling any exceptions
try:
# trigger the callback within its own thread, so it won't block others if it's long-running
threading.Thread(target=self.callback, kwargs=kwargs, daemon=True).start()
except Exception as e:
logger.error(
"Error while processing callback for {}: {}".format(
repr(self.record), repr(e)
)
)
def __eq__(self, val):
if isinstance(val, str):
return self.callback_id.startswith(val)
elif isinstance(val, Callback):
return self.callback_id == val.callback_id
else:
return False
class RecordStore(object):
def __init__(self, client, cache_key=None):
self._mutex = Lock()
self._client = client
self._cache_key = cache_key
self._values = defaultdict(lambda: defaultdict(dict))
self._role = defaultdict(lambda: defaultdict(str))
self._collection_row_ids = {}
self._callbacks = defaultdict(lambda: defaultdict(list))
self._records_to_refresh = {}
self._pages_to_refresh = []
with self._mutex:
self._load_cache()
def _get(self, table, id):
return self._values[table].get(id, Missing)
def add_callback(self, record, callback, callback_id=None, extra_kwargs={}):
assert callable(
callback
), "The callback must be a 'callable' object, such as a function."
self.remove_callbacks(record._table, record.id, callback_id)
callback_obj = Callback(
callback, record, callback_id=callback_id, extra_kwargs=extra_kwargs
)
self._callbacks[record._table][record.id].append(callback_obj)
return callback_obj
def remove_callbacks(self, table, id, callback_or_callback_id_prefix=""):
"""
Remove all callbacks for the record specified by `table` and `id` that have a callback_id
starting with the string `callback_or_callback_id_prefix`, or are equal to the provided callback.
"""
if callback_or_callback_id_prefix is None:
return
callbacks = self._callbacks[table][id]
while callback_or_callback_id_prefix in callbacks:
callbacks.remove(callback_or_callback_id_prefix)
def _get_cache_path(self, attribute):
return str(
Path(CACHE_DIR).joinpath("{}{}.json".format(self._cache_key, attribute))
)
def _load_cache(self, attributes=("_values", "_role", "_collection_row_ids")):
if not self._cache_key:
return
for attr in attributes:
try:
with open(self._get_cache_path(attr)) as f:
if attr == "_collection_row_ids":
self._collection_row_ids.update(json.load(f))
else:
for k, v in json.load(f).items():
getattr(self, attr)[k].update(v)
except (FileNotFoundError, ValueError):
pass
def set_collection_rows(self, collection_id, row_ids):
if collection_id in self._collection_row_ids:
old_ids = set(self._collection_row_ids[collection_id])
new_ids = set(row_ids)
added = new_ids - old_ids
removed = old_ids - new_ids
for id in added:
self._trigger_callbacks(
"collection",
collection_id,
[("row_added", "rows", id)],
old_ids,
new_ids,
)
for id in removed:
self._trigger_callbacks(
"collection",
collection_id,
[("row_removed", "rows", id)],
old_ids,
new_ids,
)
self._collection_row_ids[collection_id] = row_ids
self._save_cache("_collection_row_ids")
def get_collection_rows(self, collection_id):
return self._collection_row_ids.get(collection_id, [])
def _save_cache(self, attribute):
if not self._cache_key:
return
with open(self._get_cache_path(attribute), "w") as f:
json.dump(getattr(self, attribute), f)
def _trigger_callbacks(self, table, id, difference, old_val, new_val):
for callback_obj in self._callbacks[table][id]:
callback_obj(difference, old_val, new_val)
def get_role(self, table, id, force_refresh=False):
self.get(table, id, force_refresh=force_refresh)
return self._role[table].get(id, None)
def get(self, table, id, force_refresh=False, limit=100):
id = extract_id(id)
# look up the record in the current local dataset
result = self._get(table, id)
# if it's not found, try refreshing the record from the server
if result is Missing or force_refresh:
if table == "block":
self.call_load_page_chunk(id,limit=limit)
else:
self.call_get_record_values(**{table: id})
result = self._get(table, id)
return result if result is not Missing else None
def _update_record(self, table, id, value=None, role=None):
callback_queue = []
with self._mutex:
if role:
logger.debug("Updating 'role' for {}/{} to {}".format(table, id, role))
self._role[table][id] = role
self._save_cache("_role")
if value:
logger.debug(
"Updating 'value' for {}/{} to {}".format(table, id, value)
)
old_val = self._values[table][id]
difference = list(
diff(
old_val,
value,
ignore=["version", "last_edited_time", "last_edited_by"],
expand=True,
)
)
self._values[table][id] = value
self._save_cache("_values")
if old_val and difference:
logger.debug("Value changed! Difference: {}".format(difference))
callback_queue.append((table, id, difference, old_val, value))
# run callbacks outside the mutex to avoid lockups
for cb in callback_queue:
self._trigger_callbacks(*cb)
def call_get_record_values(self, **kwargs):
"""
Call the server's getRecordValues endpoint to update the local record store. The keyword arguments map
table names into lists of (or singular) record IDs to load for that table. Use True to refresh all known
records for that table.
"""
requestlist = []
for table, ids in kwargs.items():
# ensure "ids" is a proper list
if ids is True:
ids = list(self._values.get(table, {}).keys())
if isinstance(ids, str):
ids = [ids]
# if we're in a transaction, add the requested IDs to a queue to refresh when the transaction completes
if self._client.in_transaction():
self._records_to_refresh[table] = list(
set(self._records_to_refresh.get(table, []) + ids)
)
continue
requestlist += [{"table": table, "id": extract_id(id)} for id in ids]
if requestlist:
logger.debug(
"Calling 'getRecordValues' endpoint for requests: {}".format(
requestlist
)
)
results = self._client.post(
"getRecordValues", {"requests": requestlist}
).json()["results"]
for request, result in zip(requestlist, results):
self._update_record(
request["table"],
request["id"],
value=result.get("value"),
role=result.get("role"),
)
def get_current_version(self, table, id):
values = self._get(table, id)
if values and "version" in values:
return values["version"]
else:
return -1
def call_load_page_chunk(self, page_id, limit=100):
if self._client.in_transaction():
self._pages_to_refresh.append(page_id)
return
data = {
"pageId": page_id,
"limit": limit,
"cursor": {"stack": []},
"chunkNumber": 0,
"verticalColumns": False,
}
recordmap = self._client.post("loadPageChunk", data).json()["recordMap"]
self.store_recordmap(recordmap)
def store_recordmap(self, recordmap):
for table, records in recordmap.items():
if not isinstance(records, dict):
continue
for id, record in records.items():
if not isinstance(record, dict):
continue
self._update_record(
table, id, value=record.get("value"), role=record.get("role")
)
def call_query_collection(
self,
collection_id,
collection_view_id,
search="",
type="table",
aggregate=[],
aggregations=[],
filter={},
sort=[],
calendar_by="",
group_by="",
limit=1000000
):
assert not (
aggregate and aggregations
), "Use only one of `aggregate` or `aggregations` (old vs new format)"
# convert singletons into lists if needed
if isinstance(aggregate, dict):
aggregate = [aggregate]
if isinstance(sort, dict):
sort = [sort]
data = {
"collection": {"id": collection_id, "spaceId": "222a7de6-212c-4d96-b462-f0d2f6698ea0"},
"collectionView": {"id": collection_view_id, "spaceId": "222a7de6-212c-4d96-b462-f0d2f6698ea0"},
"loader": {
"loadContentCover": True,
"searchQuery": search,
"userLocale": "en",
"userTimeZone": str(get_localzone()),
"type": "reducer",
"reducers": {
"collection_group_results": {"type": "results", "limit": limit},
"table:uncategorized:title:count": {
"type": "aggregation",
"aggregation": {"property": "title", "aggregator": "count"},
},
},
},
"query": {
"aggregate": aggregate,
"aggregations": aggregations,
"filter": filter,
"sort": sort,
},
}
response = self._client.post("queryCollection", data).json()
self.store_recordmap(response["recordMap"])
return response["result"]
def handle_post_transaction_refreshing(self):
for block_id in self._pages_to_refresh:
self.call_load_page_chunk(block_id)
self._pages_to_refresh = []
self.call_get_record_values(**self._records_to_refresh)
self._records_to_refresh = {}
def run_local_operations(self, operations):
"""
Called to simulate the results of running the operations on the server, to keep the record store in sync
even when we haven't completed a refresh (or we did a refresh but the database hadn't actually updated yet...)
"""
for operation in operations:
self.run_local_operation(**operation)
def run_local_operation(self, table, id, path, command, args):
with self._mutex:
path = deepcopy(path)
new_val = deepcopy(self._values[table][id])
ref = new_val
# loop and descend down the path until it's consumed, or if we're doing a "set", there's one key left
while (len(path) > 1) or (path and command != "set"):
comp = path.pop(0)
if comp not in ref:
ref[comp] = [] if "list" in command else {}
ref = ref[comp]
if command == "update":
assert isinstance(ref, dict)
ref.update(args)
elif command == "set":
assert isinstance(ref, dict)
if path:
ref[path[0]] = args
else:
# this is the case of "setting the top level" (i.e. creating a record)
ref.clear()
ref.update(args)
elif command == "listAfter":
assert isinstance(ref, list)
if "after" in args:
ref.insert(ref.index(args["after"]) + 1, args["id"])
else:
ref.append(args["id"])
elif command == "listBefore":
assert isinstance(ref, list)
if "before" in args:
ref.insert(ref.index(args["before"]), args["id"])
else:
ref.insert(0, args["id"])
elif command == "listRemove":
try:
ref.remove(args["id"])
except ValueError:
pass
self._update_record(table, id, value=new_val)
|
ngrok.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#**
#
#########
# trape #
#########
#
# trape depends of this file
# For full copyright information this visit: https://github.com/jofpin/trape
#
# Copyright 2018 by Jose Pino (@jofpin) / <jofpin@gmail.com>
#**
import sys
import os, platform
import subprocess
import socket
import os.path as path
from multiprocessing import Process
class ngrok(object):
def __init__(self, authtoken, port, nT, hash):
if authtoken:
self.token = authtoken
else:
print("Can't use Ngrok without a valid token")
system_type = os.name
system_name = platform.system()
system_architecture = platform.architecture()[0]
str_ngrok = './ngrok'
if "nt" in system_type:
str_ngrok = './ngrok.exe'
if path.exists(str_ngrok):
pass
else:
import urllib.request, urllib.error, urllib.parse
if "posix" in system_type:
if "arwin" in system_name:
if "64" in system_architecture:
download_link = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-darwin-amd64.zip"
else:
download_link = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-darwin-386.zip"
else:
if "64" in system_architecture:
download_link = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip"
else:
download_link = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-386.zip"
elif "nt" in system_type:
if "64" in system_architecture:
download_link = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-windows-amd64.zip"
else:
download_link = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-windows-386.zip"
else:
sys.exit(0)
filename = "ngrok.zip"
download = urllib.request.urlopen(download_link)
saved_file=open(filename,"b+w")
saved_file.write(download.read())
saved_file.close()
result = subprocess.check_output(["unzip", filename])
os.remove(filename)
subprocess.check_output([str_ngrok, "authtoken", authtoken])
if nT > 0:
pNg = Process(target=start_ngrok, args=(str(port), hash, 1))
pNg.start()
def start_ngrok(port, hash, f=0):
if f != 0:
str_ngrok = './ngrok'
system_type = os.name
if "nt" in system_type:
str_ngrok = './ngrok.exe'
result = subprocess.check_output([str_ngrok, "http", port])
print(result)
|
sphero.py | #!/usr/bin/python3
import sys
import time
import binascii
import os
import threading
import bluepy
import yaml
from .util import *
#should it be in a different format?
RobotControlService = "22bb746f2ba075542d6f726568705327"
BLEService = "22bb746f2bb075542d6f726568705327"
AntiDosCharacteristic = "22bb746f2bbd75542d6f726568705327"
TXPowerCharacteristic = "22bb746f2bb275542d6f726568705327"
WakeCharacteristic = "22bb746f2bbf75542d6f726568705327"
ResponseCharacteristic = "22bb746f2ba675542d6f726568705327"
CommandsCharacteristic = "22bb746f2ba175542d6f726568705327"
# class DATA_MASK_LIST(object):
# IMU_PITCH = bytes.fromhex("0004 0000")
# IMU_ROLL = bytes.fromhex("0002 0000")
# IMU_YAW = bytes.fromhex("0001 0000")
# ACCEL_X = bytes.fromhex("0000 8000")
# ACCEL_Y = bytes.fromhex("0000 4000")
# ACCEL_Z = bytes.fromhex("0000 2000")
# GYRO_X = bytes.fromhex("0000 1000")
# GYRO_Y = bytes.fromhex("0000 0800")
# GYRO_Z = bytes.fromhex("0000 0400")
class DelegateObj(bluepy.btle.DefaultDelegate):
"""
Delegate object that get calls when there is a notification
"""
def __init__(self, sphero_obj,lock):
bluepy.btle.DefaultDelegate.__init__(self)
self._sphero_obj = sphero_obj
self._callback_dict = {}
self._wait_list = {}
self._data_group_callback = {}
self._enabled_group = []
self._buffer_bytes = b''
self._notification_lock = lock
def register_callback(self, seq, callback):
self._callback_dict[seq] = callbackl
def register_async_callback(self, group_name, callback):
self._data_group_callback[group_name] = callback
self._enabled_group = list(set(self._enabled_group) | set([group_name]))
def handle_callbacks(self, packet):
#unregister callback
callback = self._callback_dict.pop(packet[3])
MRSP = packet[2]
dlen = (packet[4] - 1)
data = []
if(dlen > 0):
data = packet[5:5+dlen]
#parse the packet
callback(MRSP, data)
def wait_for_resp(self,seq,timeout=None):
#this is a dangerous function, it waits for a response in the handle notification part
self._wait_list[seq] = None;
while(self._wait_list[seq] == None):
#time.sleep(0.1)
with self._notification_lock:
self._sphero_obj._device.waitForNotifications(0.05)
return self._wait_list.pop(seq)
def wait_for_sim_response(self, seq, timeout=None):
#this is a dangerous function, it waits for a response in the handle notification part
self._wait_list[seq] = None;
while(self._wait_list[seq] == None):
#time.sleep(0.1)
with self._notification_lock:
self._sphero_obj._device.waitForNotifications(0.05)
data = self._wait_list.pop(seq)
return (len(data) == 6 and data[0] == 255)
def parse_single_pack(self, data):
if(data[1] == 255):
#get the sequence number and check if a callback is assigned
if(data[3] in self._callback_dict):
self.handle_callbacks(data)
#check if we have it in the wait list
elif(data[3] in self._wait_list):
self._wait_list[data[3]] = data
#simple response
elif(len(data) == 6 and data[0] == 255 and data[2] == 0):
pass
#print("receive simple response for seq:{}".format(data[3]))
else:
print("unknown response:{}".format(data))
#Sync Message
elif(data[1] == 254):
##print("receive async")
#Async Message
if(data[2] == int.from_bytes(b'\x03','big')):
#the message is sensor data streaming
#get the number of bytes
data_length = int.from_bytes(data[3:5],'big') - 1#minus one for the checksum_val
index = 5 #where the data starts
#the order is same as the mask list
mask_list = self._sphero_obj._mask_list
for i,info in enumerate(mask_list):
group_key = info["name"]
#check if we enable the group
if(group_key in self._enabled_group):
group_info = info["values"]
info = {}
for i,value in enumerate(group_info):
end_index = index + 2
#it's a 16bit value
info[value["name"]] = int.from_bytes(data[index:end_index],'big',signed=True)
index = end_index
#now we pass the info to the callback
# might think about spliting this into a different thread
if group_key in self._data_group_callback:
self._data_group_callback[group_key](info)
elif(data[2] == int.from_bytes(b'\x09','big')):
#orbbasic error message:
print("orbBasic Error Message:")
print(data[2:])
elif(data[2] == int.from_bytes(b'\x0A','big')):
print(data[2:])
else:
print("unknown async response:{}".format(data))
else:
pass
def handleNotification(self, cHandle, data):
#merge the data with previous incomplete instance
self._buffer_bytes = self._buffer_bytes + data
#loop through it and see if it's valid
while(len(self._buffer_bytes) > 0):
#split the data until it's a valid chunk
index = 1
max_size = len(self._buffer_bytes)
data_single = self._buffer_bytes[:index]
while (not package_validator(data_single) and index <= max_size):
index += 1
data_single = self._buffer_bytes[:index]
if(index >= max_size):
#this mean the whole buffer it the message,
#it either could mean it's invalid or valid
if(package_validator(data_single)):
#this mean the data is valid
self._buffer_bytes = b'' #clear the buffer
else:
#this mean the data is not valid
#keep the existing data in the buffer
break #because we don't have enough data to parse anything
#resize the new buffer
self._buffer_bytes = self._buffer_bytes[index:]
#now we parse a single instant
self.parse_single_pack(data_single)
class Sphero(object):
RAW_MOTOR_MODE_OFF = "00"
RAW_MOTOR_MODE_FORWARD = "01"
RAW_MOTOR_MODE_REVERSE = "02"
RAW_MOTOR_MODE_BRAKE = "03"
RAW_MOTOR_MODE_IGNORE = "04"
def __init__(self, addr=None):
if(addr == None):
#search for sphero
sphero_list = search_for_sphero()
if(len(sphero_list) == 0):
raise "No Sphero Found in Vicinity"
addr = sphero_list[0]
self._addr = addr
self._connected = False
self._seq_counter = 0
self._stream_rate = 10
#load the mask list
with open(os.path.join(os.path.dirname(__file__),'data','mask_list.yaml'),'r') as mask_file:
self._mask_list = yaml.load(mask_file)
self._curr_data_mask = bytes.fromhex("0000 0000")
self._notification_lock = threading.RLock()
#start a listener loop
def connect(self):
"""
Connects the sphero with the address given in the constructor
"""
self._device = bluepy.btle.Peripheral(self._addr, addrType=bluepy.btle.ADDR_TYPE_RANDOM)
self._notifier = DelegateObj(self, self._notification_lock)
#set notifier to be notified
self._device.withDelegate(self._notifier)
self._devModeOn()
self._connected = True #Might need to change to be a callback format
#get the command service
cmd_service = self._device.getServiceByUUID(RobotControlService)
self._cmd_characteristics = {}
characteristic_list = cmd_service.getCharacteristics()
for characteristic in characteristic_list:
uuid_str = binascii.b2a_hex(characteristic.uuid.binVal).decode('utf-8')
self._cmd_characteristics[uuid_str] = characteristic
self._listening_flag = True
self._listening_thread = threading.Thread(target=self._listening_loop)
self._listening_thread.start()
def _devModeOn(self):
"""
A sequence of read/write that enables the developer mode
"""
service = self._device.getServiceByUUID(BLEService)
characteristic_list = service.getCharacteristics()
#make it into a dict
characteristic_dict = {}
for characteristic in characteristic_list:
uuid_str = binascii.b2a_hex(characteristic.uuid.binVal).decode('utf-8')
characteristic_dict[uuid_str] = characteristic
characteristic = characteristic_dict[AntiDosCharacteristic]
characteristic.write("011i3".encode(),True)
characteristic = characteristic_dict[TXPowerCharacteristic]
characteristic.write((7).to_bytes(1, 'big'),True)
characteristic = characteristic_dict[WakeCharacteristic]
characteristic.write((1).to_bytes(1, 'big'),True)
def command(self, cmd, data, resp=True):
"""
cmd - (str) Hex String that is the command's code(ff, no need to put \\x in front)
data - [bytes/str/int] an array of values with what to send. We will reformat int and string
resp - (bool) whether the command will only return after we get an acknowledgement from Sphero. If set to false, sphero will be set to NOT even send a response to save bandwidth
-----
return - (tuple) A tuple with the first element being sequence number and second element being the response if blocked, None if not
"""
#format data
data_list = self._format_data_array(data)
#set the sop2 based on the blocking command
sop2 = "ff" if resp else "fe"
#send command
seq_num = self._send_command(sop2, "02", cmd, data_list)
#check if blocking
if(resp):
resp = self._notifier.wait_for_resp(seq_num)
#return the sequence number and response
return (seq_num, resp)
else:
return (seq_num, None)
def _send_command(self,sop2,did,cid,data_list):
sop1 = binascii.a2b_hex("ff")
sop2 = binascii.a2b_hex(sop2)
did = binascii.a2b_hex(did)
cid = binascii.a2b_hex(cid)
seq_val = self._get_sequence()
seq = seq_val.to_bytes(1,"big")
dlen = (count_data_size(data_list)+1).to_bytes(1,"big")#add one for checksum
packet = [sop1,sop2,did,cid,seq,dlen] + data_list
packet += [cal_packet_checksum(packet[2:]).to_bytes(1,'big')] #calculate the checksum
#write the command to Sphero
#print("cmd:{} packet:{}".format(cid, b"".join(packet)))
with self._notification_lock:
self._cmd_characteristics[CommandsCharacteristic].write(b"".join(packet))
return seq_val
# def sleep(self, timeout):
# """
# Sleep function that allows the notifications to be fired
# """
# startTime = time.time()
# while(time.time() - startTime <= timeout):
# #time.sleep(1)
# #self._device.waitForNotifications(1)
def _listening_loop(self):
pass
#while(self._listening_flag):
#with self._notification_lock:
#self._device.waitForNotifications(0.001)
def _get_sequence(self):
val = self._seq_counter
self._seq_counter += 1
self._seq_counter = self._seq_counter%256
return val
def _format_data_array(self, arr):
"""
helper function that converts int or string to bytes, just want to decrease the number of codes
"""
if isinstance(arr,list):
for i,value in enumerate(arr):
if isinstance(value, str):
arr[i] = binascii.a2b_hex(value)
elif isinstance(value, int):
arr[i] = value.to_bytes(1,'big')
return arr
""" CORE functionality """
def ping(self):
return self._send_command("ff","00","01",[])
def version(self):
#NOTE returning weird data not sure what's wrong
seq_num = self._send_command("ff","00","02",[])
response = self._notifier.wait_for_resp(seq_num)
data_response = response[5:-1]
version_data = {}
version_data["RECV"] = hex(data_response[0])
version_data["MDL"] = hex(data_response[1])
version_data["HW"] = data_response[2]
version_data["MSA-ver"] = data_response[3]
version_data["MSA-rev"] = data_response[4]
version_data["BL"] = hex(data_response[5])
return version_data
def get_device_name(self):
seq_num = self._send_command("ff","00","11",[])
response = self._notifier.wait_for_resp(seq_num)
name_data = {}
name_data["name"] = str(response[5:21],'utf-8').rstrip(' \t\r\n\0')
name_data["bta"] = str(response[21:33],'utf-8')
name_data["color"] = str(response[33:36],'utf-8')
return name_data
""" Sphero functionality """
def roll(self, speed, heading, resp=False):
"""
Roll the ball towards the heading
speed - (int) speed
heading - (int) which direction, 0 - 359
resp - (bool) whether the code will wait for comfirmation from Sphero
"""
heading_bytes = heading.to_bytes(2,byteorder='big')
data = [speed,heading_bytes[0],heading_bytes[1], 1]
#send command
self.command('30',data, resp=resp)
def boost(self):
raise NotImplementedError
def set_heading(self, heading, resp=False):
"""
change the heading of the robot
heading - (int) heading in the range of 0-355
resp - (bool) Whether to receive comfirmation response from sphero
"""
heading_bytes = heading.to_bytes(2,byteorder='big')
data = [heading_bytes[0],heading_bytes[1]]
#send command
self._command("01",data, resp=resp)
def set_rgb_led(self, red, green, blue, resp=False):
"""
Set the color of Sphero's LED
red - (int) Color of red in range 0-255
green - (int) Color of green in range 0-255
blue - (int) Color of blue in range 0-255
resp - (bool) whether the code will wait for comfirmation from Sphero
"""
#set data
data = [red, green, blue, 0]
#send command
self.command("20", data, resp=resp)
def get_rgb_led(self):
"""
Get the color of Sphero's LED
----
return - tuple of the color in RGB
"""
#set the correct command
(seq_num, resp) = self.command("22",[])
#parse the response packet and make sure it's correct
if resp and resp[4] == 4:
MRSP = resp[2]
red = resp[5]
green = resp[6]
blue = resp[7]
return (red, green, blue)
else:
return None
def _handle_mask(self,group_name, remove=False):
if(remove):
optr = XOR_mask
else:
optr = OR_mask
for i,group in enumerate(self._mask_list):
if(group["name"] == group_name):
for i, value in enumerate(group["values"]):
self._curr_data_mask = optr(self._curr_data_mask, bytes.fromhex(value["mask"]))
def _start_data_stream(self, group_name,rate):
## '\xff\xff\x02\x11\x01\x0e\x00(\x00\x01\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x98'
#handle mask
self._handle_mask(group_name)
#send the mask as data
self._stream_rate = rate
self._send_data_command(rate,self._curr_data_mask,(0).to_bytes(4,'big'))
def _send_data_command(self,rate,mask1,mask2,sample=1):
N = ((int)(400/rate)).to_bytes(2,byteorder='big')
#N = (40).to_bytes(2,byteorder='big')
M = (sample).to_bytes(2,byteorder='big')
PCNT = (0).to_bytes(1,'big')
#MASK2 = (mask2).to_bytes(4,'big')
data = [N,M, mask1 ,PCNT,mask2]
self.command("11",data, resp=True) #make sure sphero actully receive this
def _stop_data_stream(self, group_name):
#handle mask
self._handle_mask(group_name,remove=True)
self._send_data_command(self._stream_rate,self._curr_data_mask,(0).to_bytes(4,'big'))
def start_gyro_callback(self,rate,callback):
"""
Set a gyro callback that streams the data to the callback
callback - (function) function that we will pass the information when there is a callback
"""
name = "Gyro"
#first we register the callback with the notifier
self._notifier.register_async_callback(name,callback)
#start data stream
self._start_data_stream(name,rate)
def start_accel_callback(self,rate,callback):
"""
Set a accelerator callback that streams the data to the callback
callback - (function) function that we will pass the information when there is a callback
"""
name = "Accel"
#first we register the callback with the notifier
self._notifier.register_async_callback(name,callback)
#start data stream
self._start_data_stream(name,rate)
def start_IMU_callback(self,rate,callback):
"""
Set a IMU callback that streams the data to the callback
callback - (function) function that we will pass the information when there is a callback
"""
name = "IMU"
#first we register the callback with the notifier
self._notifier.register_async_callback(name,callback)
#start data stream
self._start_data_stream(name,rate)
def stop_gyro_callback(self):
self._stop_data_stream("Gyro")
def stop_accel_callback(self):
self._stop_data_stream("Accel")
def stop_IMU_callback(self):
self._stop_data_stream("IMU")
def set_stabilization(self,bool_flag, resp=False):
"""
Enable/Disable stabilization of Sphero
bool_flag - (bool) stabilization on/off
resp[Optional] - (bool) whether the code will wait for comfirmation from Sphero, default to False
"""
data = ["01" if bool_flag else "00"]
self.command("02",data, resp=resp)
def set_raw_motor_values(self,lmode,lpower,rmode,rpower, resp=False):
"""
Set the raw motor values of Sphero
lmode - (str) the hex string(without \\x) of the mode
lpower - (int) the value of the power from 0-255
rmode - (str) the hex string(without \\x) of the mode
rpower - (int) the value of the power from 0-255
resp[Optional] - (bool) whether the code will wait for comfirmation from Sphero, default to False
"""
data = [lmode, int(lpower), rmode, int(rpower)]
#By default, we going to cancel it
self.command("33",data, resp=resp)
""" About MACRO """
def abort_macro(self, id_):
"""
Abort the current macro with the given ID
id - (int) the ID of the macro to stop
"""
data = [id_]
self.command("55",data)
def run_macro(self, id_):
"""
Start the macro with the given ID
id_ - (int) the 8-bit ID of the macro
"""
data = [id_]
self.command("50",data)
""" OrbBasic the programming language """
STORAGE_RAM = "00"
STORAGE_PERSISTENT = "01"
def erase_orb_basic_storage(self, area, block=True):
"""
Erase any existing program in the stored area
area - (str) hex name of the area to be cleaned
"""
data = [area]
seq = self.command("60", data)
if(block):
return self._notifier.wait_for_sim_response(seq)
else:
return True
def run_orb_basic_program(self, area, start_line):
"""
Run a the orb_basic program stored in that area
area - (str) hex name of the area
start_line - (int) the decimal line number to start
"""
data = [area,start_line.to_bytes(2,byteorder='big')]
seq = self.command("62", data)
return self._notifier.wait_for_sim_response(seq)
def abort_orb_basic_program(self):
"""
Abort the orb_basic program
"""
data = []
seq = self.command("63", data)
return self._notifier.wait_for_sim_response(seq)
def append_orb_basic_fragment(self, area,val):
"""
Append the value into ht orb basic given the area
val - (list of strings) the command broken down into a list of hex values
area - (str) hex name of the area
"""
val.insert(0, area)
data = val
seq = self.command("61",data)
if self._notifier.wait_for_sim_response(seq):
pass
else:
print("error in appending orbbasic fragments")
def append_orb_basic_line(self, area,code):
"""
Append the line to the existing code
"""
#first convert the line into list of bytes
code_list = []
for c in code:
code_list.append(bytes(c,encoding="UTF-8"))
if len(code_list) == 0:
code_list.append(b'\x00') # NULL in the end
#code_list.append(b'\x0a')#append the terminating line
#send it to next part of the program
self.append_orb_basic_fragment(area,code_list)
|
demo.py | #!/usr/bin/env python
# coding=utf-8
import tensorflow as tf
import bottle
from bottle import route, run
import threading
import json
import numpy as np
from time import sleep
from Bert_model import BERT_model
'''
This file is taken and modified from R-Net by Minsangkim142
https://github.com/minsangkim142/R-net
'''
app = bottle.Bottle()
query = ("", "")
response = ""
@app.get("/")
def home():
with open('demo.html', 'r') as fl:
html = fl.read()
return html
@app.post('/answer')
def answer():
passage = bottle.request.json['passage']
question = bottle.request.json['question']
print("received question: {}".format(question))
# if not passage or not question:
# exit()
global query, response
query = (passage, question)
if query[1] != "" and query[0] != "":
while not response:
sleep(0.1)
else :
response = "Paragraph or question field is empty"
print("received response: {}".format(response))
response_ = {"answer": response}
response = []
return response_
class Demo(object):
def __init__(self, model, config):
run_event = threading.Event()
run_event.set()
self.close_thread = True
self.model = model
threading.Thread(target=self.demo_backend).start()
app.run(port=8000, host='0.0.0.0')
try:
while 1:
sleep(.1)
except KeyboardInterrupt:
print("Closing server...")
self.close_thread = False
def demo_backend(self):
global query, response
while self.close_thread:
sleep(0.1)
if query[1] != "" and query[0] != "":
print("Hello")
response = self.model.predict_example(query[0], query[1])
query = ("", "")
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help='Path to bert_config.json', required=True)
parser.add_argument('-v', '--vocab', help='Path to vocab.txt', required=True)
parser.add_argument('-o', '--output', help='Directory of model outputs', required=True)
def main():
args = parser.parse_args()
AI = BERT_model(args.config, args.vocab, args.output)
demo = Demo(AI, None)
if __name__ == "__main__":
main() |
testboxtasks.py | # -*- coding: utf-8 -*-
# $Id: testboxtasks.py 70566 2018-01-12 18:25:48Z vboxsync $
"""
TestBox Script - Async Tasks.
"""
__copyright__ = \
"""
Copyright (C) 2012-2017 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 70566 $"
# Standard python imports.
from datetime import datetime
import os
import re
import signal;
import sys
import subprocess
import threading
import time
# Validation Kit imports.
from common import constants
from common import utils;
from common import webutils;
import testboxcommons
# Figure where we are.
try: __file__
except: __file__ = sys.argv[0];
g_ksTestScriptDir = os.path.dirname(os.path.abspath(__file__));
class TestBoxBaseTask(object):
"""
Asynchronous task employing a thread to do the actual work.
"""
## Time to wait for a task to terminate.
kcSecTerminateTimeout = 60
def __init__(self, oTestBoxScript, cSecTimeout, fnThreadProc):
self._oTestBoxScript = oTestBoxScript;
self._cSecTimeout = cSecTimeout;
self._tsSecStarted = utils.timestampSecond();
self.__oRLock = threading.RLock();
self._oCv = threading.Condition(self.__oRLock);
self._fRunning = True; # Protected by lock.
self._fShouldTerminate = False; # Protected by lock.
# Spawn the worker thread.
self._oThread = threading.Thread(target=fnThreadProc);
self._oThread.daemon = True;
self._oThread.start();
def _lock(self):
""" Take the CV lock. """
self._oCv.acquire();
def _unlock(self):
""" Release the CV lock. """
self._oCv.release();
def _complete(self):
"""
Indicate that the task is complete, waking up the main thread.
Usually called at the end of the thread procedure.
"""
self._lock();
self._fRunning = False;
self._oCv.notifyAll();
self._unlock();
def isRunning(self):
""" Check if the task is still running. """
self._lock();
fRunning = self._fRunning;
self._unlock();
return fRunning;
def wait(self, cSecTimeout):
""" Wait for the task to complete. """
self._lock();
fRunning = self._fRunning;
if fRunning is True and cSecTimeout > 0:
self._oCv.wait(cSecTimeout)
self._unlock();
return fRunning;
def terminate(self, cSecTimeout = kcSecTerminateTimeout):
""" Terminate the task. """
self._lock();
self._fShouldTerminate = True;
self._unlock();
return self.wait(cSecTimeout);
def _shouldTerminate(self):
"""
Returns True if we should terminate, False if not.
"""
self._lock();
fShouldTerminate = self._fShouldTerminate is True;
self._unlock();
return fShouldTerminate;
class TestBoxTestDriverTask(TestBoxBaseTask):
"""
Base class for tasks involving test drivers.
"""
## When to flush the backlog of log messages.
kcchMaxBackLog = 32768;
## The backlog sync time (seconds).
kcSecBackLogFlush = 30;
## The timeout for the cleanup job (5 mins).
kcSecCleanupTimeout = 300;
## The timeout to wait for the abort command before killing it.
kcSecAbortTimeout = 300;
## The timeout to wait for the final output to be processed.
kcSecFinalOutputTimeout = 180;
## The timeout to wait for the abort command output to be processed.
kcSecAbortCmdOutputTimeout = 30;
## The timeout to wait for the terminate output to be processed.
kcSecTerminateOutputTimeout = 30;
## The timeout to wait for the kill output to be processed.
kcSecKillOutputTimeout = 30;
## The timeout for talking to the test manager.
ksecTestManagerTimeout = 60;
def __init__(self, oTestBoxScript, fnThreadProc, cSecTimeout, idResult, sScriptCmdLine):
"""
Class instance init
"""
# Init our instance data.
self._idResult = idResult;
self._sScriptCmdLine = sScriptCmdLine;
self._oChild = None;
self._oBackLogLock = threading.RLock();
self._oBackLogFlushLock = threading.RLock();
self._asBackLog = [];
self._cchBackLog = 0;
self._secTsBackLogFlush = utils.timestampSecond();
# Init super.
TestBoxBaseTask.__init__(self, oTestBoxScript, cSecTimeout, fnThreadProc);
def terminate(self, cSecTimeout = kcSecCleanupTimeout):
""" Reimplement with higher default timeout. """
return TestBoxBaseTask.terminate(self, cSecTimeout);
def _logFlush(self, oGivenConnection = None):
"""
Flushes the log to the test manager.
No exceptions.
"""
fRc = True;
self._oBackLogFlushLock.acquire();
# Grab the current back log.
self._oBackLogLock.acquire();
asBackLog = self._asBackLog;
self._asBackLog = [];
self._cchBackLog = 0;
self._secTsBackLogFlush = utils.timestampSecond();
self._oBackLogLock.release();
# If there is anything to flush, flush it.
if asBackLog:
sBody = '';
for sLine in asBackLog:
sBody += sLine + '\n';
oConnection = None;
try:
if oGivenConnection is None:
oConnection = self._oTestBoxScript.openTestManagerConnection();
oConnection.postRequest(constants.tbreq.LOG_MAIN, {constants.tbreq.LOG_PARAM_BODY: sBody});
oConnection.close();
else:
oGivenConnection.postRequest(constants.tbreq.LOG_MAIN, {constants.tbreq.LOG_PARAM_BODY: sBody});
except Exception as oXcpt:
testboxcommons.log('_logFlush error: %s' % (oXcpt,));
if len(sBody) < self.kcchMaxBackLog * 4:
self._oBackLogLock.acquire();
asBackLog.extend(self._asBackLog);
self._asBackLog = asBackLog;
# Don't restore _cchBackLog as there is no point in retrying immediately.
self._oBackLogLock.release();
if oConnection is not None: # Be kind to apache.
try: oConnection.close();
except: pass;
fRc = False;
self._oBackLogFlushLock.release();
return fRc;
def flushLogOnConnection(self, oConnection):
"""
Attempts to flush the logon the given connection.
No exceptions.
"""
return self._logFlush(oConnection);
def _logInternal(self, sMessage, fPrefix = True, fFlushCheck = False):
"""
Internal logging.
Won't flush the backlog, returns a flush indicator so the caller can
do it instead.
"""
if fPrefix:
try:
oNow = datetime.utcnow();
sTs = '%02u:%02u:%02u.%06u ' % (oNow.hour, oNow.minute, oNow.second, oNow.microsecond);
except Exception as oXcpt:
sTs = 'oXcpt=%s ' % (oXcpt);
sFullMsg = sTs + sMessage;
else:
sFullMsg = sMessage;
self._oBackLogLock.acquire();
self._asBackLog.append(sFullMsg);
cchBackLog = self._cchBackLog + len(sFullMsg) + 1;
self._cchBackLog = cchBackLog;
secTsBackLogFlush = self._secTsBackLogFlush;
self._oBackLogLock.release();
testboxcommons.log(sFullMsg);
return fFlushCheck \
and ( cchBackLog >= self.kcchMaxBackLog \
or utils.timestampSecond() - secTsBackLogFlush >= self.kcSecBackLogFlush);
def _log(self, sMessage):
"""
General logging function, will flush.
"""
if self._logInternal(sMessage, fFlushCheck = True):
self._logFlush();
return True;
def _reportDone(self, sResult):
"""
Report EXEC job done to the test manager.
sResult is a value from constants.result.
"""
## @todo optimize this to use one server connection.
#
# Log it.
#
assert sResult in constants.result.g_kasValidResults;
self._log('Done %s' % (sResult,));
#
# Report it.
#
fRc = True;
secStart = utils.timestampSecond();
while True:
self._logFlush(); ## @todo Combine this with EXEC_COMPLETED.
oConnection = None;
try:
oConnection = self._oTestBoxScript.openTestManagerConnection();
oConnection.postRequest(constants.tbreq.EXEC_COMPLETED, {constants.tbreq.EXEC_COMPLETED_PARAM_RESULT: sResult});
oConnection.close();
except Exception as oXcpt:
if utils.timestampSecond() - secStart < self.ksecTestManagerTimeout:
self._log('_reportDone exception (%s) - retrying...' % (oXcpt,));
time.sleep(2);
continue;
self._log('_reportDone error: %s' % (oXcpt,));
if oConnection is not None: # Be kind to apache.
try: oConnection.close();
except: pass;
fRc = False;
break;
#
# Mark the task as completed.
#
self._complete();
return fRc;
def _assembleArguments(self, sAction, fWithInterpreter = True):
"""
Creates an argument array for subprocess.Popen, splitting the
sScriptCmdLine like bourne shell would.
fWithInterpreter is used (False) when checking that the script exists.
Returns None on bad input.
"""
#
# This is a good place to export the test set id to the environment.
#
os.environ['TESTBOX_TEST_SET_ID'] = str(self._idResult);
cTimeoutLeft = utils.timestampSecond() - self._tsSecStarted;
cTimeoutLeft = 0 if cTimeoutLeft >= self._cSecTimeout else self._cSecTimeout - cTimeoutLeft;
os.environ['TESTBOX_TIMEOUT'] = str(cTimeoutLeft);
os.environ['TESTBOX_TIMEOUT_ABS'] = str(self._tsSecStarted + self._cSecTimeout);
#
# Do replacements and split the command line into arguments.
#
if self._sScriptCmdLine.find('@ACTION@') >= 0:
sCmdLine = self._sScriptCmdLine.replace('@ACTION@', sAction);
else:
sCmdLine = self._sScriptCmdLine + ' ' + sAction;
for sVar in [ 'TESTBOX_PATH_BUILDS', 'TESTBOX_PATH_RESOURCES', 'TESTBOX_PATH_SCRATCH', 'TESTBOX_PATH_SCRIPTS',
'TESTBOX_PATH_UPLOAD', 'TESTBOX_UUID', 'TESTBOX_REPORTER', 'TESTBOX_ID', 'TESTBOX_TEST_SET_ID',
'TESTBOX_TIMEOUT', 'TESTBOX_TIMEOUT_ABS' ]:
if sCmdLine.find('${' + sVar + '}') >= 0:
sCmdLine = sCmdLine.replace('${' + sVar + '}', os.environ[sVar]);
asArgs = utils.argsSplit(sCmdLine);
#
# Massage argv[0]:
# - Convert portable slashes ('/') to the flavor preferred by the
# OS we're currently running on.
# - Run python script thru the current python interpreter (important
# on systems that doesn't sport native hash-bang script execution).
#
asArgs[0] = asArgs[0].replace('/', os.path.sep);
if not os.path.isabs(asArgs[0]):
asArgs[0] = os.path.join(self._oTestBoxScript.getPathScripts(), asArgs[0]);
if asArgs[0].endswith('.py') and fWithInterpreter:
if sys.executable:
asArgs.insert(0, sys.executable);
else:
asArgs.insert(0, 'python');
return asArgs;
def _outputThreadProc(self, oChild, oStdOut, sAction):
"""
Thread procedure for the thread that reads the output of the child
process. We use a dedicated thread for this purpose since non-blocking
I/O may be hard to keep portable according to hints around the web...
"""
oThread = oChild.oOutputThread;
while not oThread.fPleaseQuit:
# Get a line.
try:
sLine = oStdOut.readline();
except Exception as oXcpt:
self._log('child (%s) pipe I/O error: %s' % (sAction, oXcpt,));
break;
# EOF?
if not sLine:
break;
# Strip trailing new line (DOS and UNIX).
if sLine.endswith("\r\n"):
sLine = sLine[0:-2];
elif sLine.endswith("\n"):
sLine = sLine[0:-1];
# Log it.
if self._logInternal(sLine, fPrefix = False, fFlushCheck = True):
self._logFlush();
# Close the stdout pipe in case we were told to get lost.
try:
oStdOut.close();
except Exception as oXcpt:
self._log('warning: Exception closing stdout pipe of "%s" child: %s' % (sAction, oXcpt,));
# This is a bit hacky, but try reap the child so it won't hang as
# defunkt during abort/timeout.
if oChild.poll() is None:
for _ in range(15):
time.sleep(0.2);
if oChild.poll() is not None:
break;
oChild = None;
return None;
def _spawnChild(self, sAction):
"""
Spawns the child process, returning success indicator + child object.
"""
# Argument list.
asArgs = self._assembleArguments(sAction)
if asArgs is None:
self._log('Malformed command line: "%s"' % (self._sScriptCmdLine,));
return (False, None);
# Spawn child.
try:
oChild = utils.processPopenSafe(asArgs,
shell = False,
bufsize = -1,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
cwd = self._oTestBoxScript.getPathSpill(),
universal_newlines = True,
close_fds = (False if utils.getHostOs() == 'win' else True),
preexec_fn = (None if utils.getHostOs() in ['win', 'os2']
else os.setsid)); # pylint: disable=E1101
except Exception as oXcpt:
self._log('Error creating child process %s: %s' % (asArgs, oXcpt));
return (False, None);
oChild.sTestBoxScriptAction = sAction;
# Start output thread, extending the child object to keep track of it.
oChild.oOutputThread = threading.Thread(target=self._outputThreadProc, args=(oChild, oChild.stdout, sAction))
oChild.oOutputThread.daemon = True;
oChild.oOutputThread.fPleaseQuit = False; # Our extension.
oChild.oOutputThread.start();
return (True, oChild);
def _monitorChild(self, cSecTimeout, fTryKillCommand = True, oChild = None):
"""
Monitors the child process. If the child executes longer that
cSecTimeout allows, we'll terminate it.
Returns Success indicator and constants.result value.
"""
if oChild is None:
oChild = self._oChild;
iProcGroup = oChild.pid;
if utils.getHostOs() in ['win', 'os2'] or iProcGroup <= 0:
iProcGroup = -2;
#
# Do timeout processing and check the health of the child.
#
sResult = constants.result.PASSED;
seStarted = utils.timestampSecond();
while True:
# Check status.
iRc = oChild.poll();
if iRc is not None:
self._log('Child doing "%s" completed with exit code %d' % (oChild.sTestBoxScriptAction, iRc));
oChild.oOutputThread.join(self.kcSecFinalOutputTimeout);
if oChild is self._oChild:
self._oChild = None;
if iRc == constants.rtexitcode.SKIPPED:
return (True, constants.result.SKIPPED);
if iRc != constants.rtexitcode.SUCCESS:
return (False, constants.result.FAILED);
return (True, constants.result.PASSED);
# Check for abort first, since that has less of a stigma.
if self._shouldTerminate() is True:
sResult = constants.result.ABORTED;
break;
# Check timeout.
cSecElapsed = utils.timestampSecond() - seStarted;
if cSecElapsed > cSecTimeout:
self._log('Timeout: %u secs (limit %u secs)' % (cSecElapsed, cSecTimeout));
sResult = constants.result.TIMED_OUT;
break;
# Wait.
cSecLeft = cSecTimeout - cSecElapsed;
oChild.oOutputThread.join(15 if cSecLeft > 15 else (cSecLeft + 1));
#
# If the child is still alive, try use the abort command to stop it
# very gently. This let's the testdriver clean up daemon processes
# and such that our code below won't catch.
#
if fTryKillCommand and oChild.poll() is None:
self._log('Attempting to abort child...');
(fRc2, oAbortChild) = self._spawnChild('abort');
if oAbortChild is not None and fRc2 is True:
self._monitorChild(self.kcSecAbortTimeout, False, oAbortChild);
oAbortChild = None;
#
# If the child is still alive, try the polite way.
#
if oChild.poll() is None:
self._log('Attempting to terminate child doing "%s"...' % (oChild.sTestBoxScriptAction,));
if iProcGroup > 0:
try:
os.killpg(iProcGroup, signal.SIGTERM); # pylint: disable=E1101
except Exception as oXcpt:
self._log('killpg() failed: %s' % (oXcpt,));
try:
self._oChild.terminate();
oChild.oOutputThread.join(self.kcSecTerminateOutputTimeout);
except Exception as oXcpt:
self._log('terminate() failed: %s' % (oXcpt,));
#
# If the child doesn't respond to polite, kill it. Always do a killpg
# should there be any processes left in the group.
#
if iProcGroup > 0:
try:
os.killpg(iProcGroup, signal.SIGKILL); # pylint: disable=E1101
except Exception as oXcpt:
self._log('killpg() failed: %s' % (oXcpt,));
if oChild.poll() is None:
self._log('Attemting to kill child doing "%s"...' % (oChild.sTestBoxScriptAction,));
try:
self._oChild.kill();
oChild.oOutputThread.join(self.kcSecKillOutputTimeout);
except Exception as oXcpt:
self._log('kill() failed: %s' % (oXcpt,));
#
# Give the whole mess a couple of more seconds to respond in case the
# output thread exitted prematurely for some weird reason.
#
if oChild.poll() is None:
time.sleep(2);
time.sleep(2);
time.sleep(2);
iRc = oChild.poll();
if iRc is not None:
self._log('Child doing "%s" aborted with exit code %d' % (oChild.sTestBoxScriptAction, iRc));
else:
self._log('Child doing "%s" is still running, giving up...' % (oChild.sTestBoxScriptAction,));
## @todo in this case we should probably try reboot the testbox...
oChild.oOutputThread.fPleaseQuit = True;
if oChild is self._oChild:
self._oChild = None;
return (False, sResult);
def _terminateChild(self):
"""
Terminates the child forcefully.
"""
if self._oChild is not None:
pass;
def _cleanupAfter(self):
"""
Cleans up after a test failure. (On success, cleanup is implicit.)
"""
assert self._oChild is None;
#
# Tell the script to clean up.
#
if self._sScriptCmdLine: # can be empty if cleanup crashed.
(fRc, self._oChild) = self._spawnChild('cleanup-after');
if fRc is True:
(fRc, _) = self._monitorChild(self.kcSecCleanupTimeout, False);
self._terminateChild();
else:
fRc = False;
#
# Wipe the stuff clean.
#
fRc2 = self._oTestBoxScript.reinitScratch(fnLog = self._log, cRetries = 6);
return fRc and fRc2;
class TestBoxCleanupTask(TestBoxTestDriverTask):
"""
Special asynchronous task for cleaning up a stale test when starting the
testbox script. It's assumed that the reason for the stale test lies in
it causing a panic, reboot, or similar, so we'll also try collect some
info about recent system crashes and reboots.
"""
def __init__(self, oTestBoxScript):
# Read the old state, throwing a fit if it's invalid.
sScriptState = oTestBoxScript.getPathState();
sScriptCmdLine = self._readStateFile(os.path.join(sScriptState, 'script-cmdline.txt'));
sResultId = self._readStateFile(os.path.join(sScriptState, 'result-id.txt'));
try:
idResult = int(sResultId);
if idResult <= 0 or idResult >= 0x7fffffff:
raise Exception('');
except:
raise Exception('Invalid id value "%s" found in %s' % (sResultId, os.path.join(sScriptState, 'result-id.txt')));
sTestBoxId = self._readStateFile(os.path.join(sScriptState, 'testbox-id.txt'));
try:
self.idTestBox = int(sTestBoxId);
if self.idTestBox <= 0 or self.idTestBox >= 0x7fffffff:
raise Exception('');
except:
raise Exception('Invalid id value "%s" found in %s' % (sTestBoxId, os.path.join(sScriptState, 'testbox-id.txt')));
self.sTestBoxName = self._readStateFile(os.path.join(sScriptState, 'testbox-name.txt'));
# Init super.
TestBoxTestDriverTask.__init__(self, oTestBoxScript, self._threadProc, self.kcSecCleanupTimeout,
idResult, sScriptCmdLine);
@staticmethod
def _readStateFile(sPath):
"""
Reads a state file, returning a string on success and otherwise raising
an exception.
"""
try:
oFile = open(sPath, "rb");
sStr = oFile.read();
sStr = sStr.decode('utf-8');
oFile.close();
return sStr.strip();
except Exception as oXcpt:
raise Exception('Failed to read "%s": %s' % (sPath, oXcpt));
def _threadProc(self):
"""
Perform the actual clean up on script startup.
"""
#
# First make sure we won't repeat this exercise should it turn out to
# trigger another reboot/panic/whatever.
#
sScriptCmdLine = os.path.join(self._oTestBoxScript.getPathState(), 'script-cmdline.txt');
try:
os.remove(sScriptCmdLine);
oFile = open(sScriptCmdLine, 'wb');
oFile.close();
except Exception as oXcpt:
self._log('Error truncating "%s": %s' % (sScriptCmdLine, oXcpt));
#
# Report the incident.
#
self._log('Seems we rebooted!');
self._log('script-cmdline="%s"' % (self._sScriptCmdLine));
self._log('result-id=%d' % (self._idResult));
self._log('testbox-id=%d' % (self.idTestBox));
self._log('testbox-name=%s' % (self.sTestBoxName));
self._logFlush();
# System specific info.
sOs = utils.getHostOs();
if sOs == 'darwin':
self._log('NVRAM Panic Info:\n%s\n' % (self.darwinGetPanicInfo(),));
self._logFlush();
## @todo Add some special command for reporting this situation so we get something
# useful in the event log.
#
# Do the cleaning up.
#
self._cleanupAfter();
self._reportDone(constants.result.REBOOTED);
return False;
def darwinGetPanicInfo(self):
"""
Returns a string with the aapl,panic-info content.
"""
# Retriev the info.
try:
sRawInfo = utils.processOutputChecked(['nvram', 'aapl,panic-info']);
except Exception as oXcpt:
return 'exception running nvram: %s' % (oXcpt,);
# Decode (%xx) and decompact it (7-bit -> 8-bit).
ahDigits = \
{
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7,
'8': 8, '9': 9, 'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15,
};
sInfo = '';
off = len('aapl,panic-info') + 1;
iBit = 0;
bLow = 0;
while off < len(sRawInfo):
# isprint is used to determine whether to %xx or %c it, so we have to
# be a little careful before assuming % sequences are hex bytes.
if sRawInfo[off] == '%' \
and off + 3 <= len(sRawInfo) \
and sRawInfo[off + 1] in ahDigits \
and sRawInfo[off + 2] in ahDigits:
bCur = ahDigits[sRawInfo[off + 1]] * 0x10 + ahDigits[sRawInfo[off + 2]];
off += 3;
else:
bCur = ord(sRawInfo[off]);
off += 1;
sInfo += chr(((bCur & (0x7f >> iBit)) << iBit) | bLow);
bLow = bCur >> (7 - iBit);
if iBit < 6:
iBit += 1;
else:
# Final bit in sequence.
sInfo += chr(bLow);
bLow = 0;
iBit = 0;
# Expand shorthand.
sInfo = sInfo.replace('@', 'com.apple.');
sInfo = sInfo.replace('>', 'com.apple.driver.');
sInfo = sInfo.replace('|', 'com.apple.iokit.');
sInfo = sInfo.replace('$', 'com.apple.security.');
sInfo = sInfo.replace('!A', 'Apple');
sInfo = sInfo.replace('!a', 'Action');
sInfo = sInfo.replace('!B', 'Bluetooth');
sInfo = sInfo.replace('!C', 'Controller');
sInfo = sInfo.replace('!F', 'Family');
sInfo = sInfo.replace('!I', 'Intel');
sInfo = sInfo.replace('!U', 'AppleUSB');
sInfo = sInfo.replace('!P', 'Profile');
# Done.
return sInfo
class TestBoxExecTask(TestBoxTestDriverTask):
"""
Implementation of a asynchronous EXEC task.
This uses a thread for doing the actual work, i.e. starting and monitoring
the child process, processing its output, and more.
"""
def __init__(self, oTestBoxScript, idResult, sScriptZips, sScriptCmdLine, cSecTimeout):
"""
Class instance init
"""
# Init our instance data.
self._sScriptZips = sScriptZips;
# Init super.
TestBoxTestDriverTask.__init__(self, oTestBoxScript, self._threadProc, cSecTimeout, idResult, sScriptCmdLine);
@staticmethod
def _writeStateFile(sPath, sContent):
"""
Writes a state file, raising an exception on failure.
"""
try:
oFile = open(sPath, "wb");
oFile.write(sContent.encode('utf-8'));
oFile.flush();
try: os.fsync(oFile.fileno());
except: pass;
oFile.close();
except Exception as oXcpt:
raise Exception('Failed to write "%s": %s' % (sPath, oXcpt));
return True;
def _saveState(self):
"""
Saves the task state on disk so we can launch a TestBoxCleanupTask job
if the test should cause system panic or similar.
Note! May later be extended to support tests that reboots the host.
"""
sScriptState = self._oTestBoxScript.getPathState();
try:
self._writeStateFile(os.path.join(sScriptState, 'script-cmdline.txt'), self._sScriptCmdLine);
self._writeStateFile(os.path.join(sScriptState, 'result-id.txt'), str(self._idResult));
self._writeStateFile(os.path.join(sScriptState, 'testbox-id.txt'), str(self._oTestBoxScript.getTestBoxId()));
self._writeStateFile(os.path.join(sScriptState, 'testbox-name.txt'), self._oTestBoxScript.getTestBoxName());
except Exception as oXcpt:
self._log('Failed to write state: %s' % (oXcpt,));
return False;
return True;
def _downloadAndUnpackScriptZips(self):
"""
Downloads/copies the script ZIPs into TESTBOX_SCRIPT and unzips them to
the same directory.
Raises no exceptions, returns log + success indicator instead.
"""
sPathScript = self._oTestBoxScript.getPathScripts();
asArchives = self._sScriptZips.split(',');
for sArchive in asArchives:
sArchive = sArchive.strip();
if not sArchive:
continue;
# Figure the destination name (in scripts).
sDstFile = webutils.getFilename(sArchive);
if len(sDstFile) < 1 \
or re.search('[^a-zA-Z0-9 !#$%&\'()@^_`{}~.-]', sDstFile) is not None: # FAT charset sans 128-255 + '.'.
self._log('Malformed script zip filename: %s' % (sArchive,));
return False;
sDstFile = os.path.join(sPathScript, sDstFile);
# Do the work.
if webutils.downloadFile(sArchive, sDstFile, self._oTestBoxScript.getPathBuilds(), self._log, self._log) is not True:
return False;
asFiles = utils.unpackFile(sDstFile, sPathScript, self._log, self._log);
if asFiles is None:
return False;
# Since zip files doesn't always include mode masks, set the X bit
# of all of them so we can execute binaries and hash-bang scripts.
for sFile in asFiles:
utils.chmodPlusX(sFile);
return True;
def _threadProc(self):
"""
Do the work of an EXEC command.
"""
sResult = constants.result.PASSED;
#
# Start by preparing the scratch directories.
#
# Note! Failures at this stage are not treated as real errors since
# they may be caused by the previous test and other circumstances
# so we don't want to go fail a build because of this.
#
fRc = self._oTestBoxScript.reinitScratch(self._logInternal);
fNeedCleanUp = fRc;
if fRc is True:
fRc = self._downloadAndUnpackScriptZips();
testboxcommons.log2('_threadProc: _downloadAndUnpackScriptZips -> %s' % (fRc,));
if fRc is not True:
sResult = constants.result.BAD_TESTBOX;
#
# Make sure the script exists.
#
if fRc is True:
sScript = self._assembleArguments('none', fWithInterpreter = False)[0];
if not os.path.exists(sScript):
self._log('The test driver script "%s" cannot be found.' % (sScript,));
sDir = sScript;
while len(sDir) > 3:
sDir = os.path.dirname(sDir);
if os.path.exists(sDir):
self._log('First existing parent directory is "%s".' % (sDir,));
break;
fRc = False;
if fRc is True:
#
# Start testdriver script.
#
fRc = self._saveState();
if fRc:
(fRc, self._oChild) = self._spawnChild('all');
testboxcommons.log2('_threadProc: _spawnChild -> %s, %s' % (fRc, self._oChild));
if fRc:
(fRc, sResult) = self._monitorChild(self._cSecTimeout);
testboxcommons.log2('_threadProc: _monitorChild -> %s' % (fRc,));
# If the run failed, do explicit cleanup.
if fRc is not True:
testboxcommons.log2('_threadProc: explicit cleanups...');
self._terminateChild();
self._cleanupAfter();
fNeedCleanUp = False;
assert self._oChild is None;
#
# Clean up scratch.
#
if fNeedCleanUp:
if self._oTestBoxScript.reinitScratch(self._logInternal, cRetries = 6) is not True:
self._log('post run reinitScratch failed.');
fRc = False;
#
# Report status and everything back to the test manager.
#
if fRc is False and sResult == constants.result.PASSED:
sResult = constants.result.FAILED;
self._reportDone(sResult);
return fRc;
|
webcam_video_stream.py | import cv2
import threading
class WebcamVideoStream:
# with modifications from https://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.frame_counter = 1
self.width = width
self.height = height
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
#Debug stream shape
self.real_width = int(self.stream.get(3))
self.real_height = int(self.stream.get(4))
print("> Start video stream with shape: {},{}".format(self.real_width,self.real_height))
def start(self):
# start the thread to read frames from the video stream
threading.Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
self.stream.release()
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
self.frame_counter += 1
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def isActive(self):
# check if VideoCapture is still Opened
return self.stream.isOpened
def resize(self):
try:
self.frame = cv2.resize(self.frame, (self.width, self.height))
except:
print("> Error resizing video stream")
|
log_fatigue_plugin.py | import asyncio
import math
import threading
import time
from time import sleep
from typing import Optional
from PySide2.QtCore import QEvent, QObject
from PySide2.QtWidgets import QDialog, QLabel, QLineEdit, QPushButton, QVBoxLayout
try:
from slacrs import Slacrs
from slacrs.model import HumanFatigue
except ImportError as ex:
Slacrs = None # type: Optional[type]
HumanFatigue = None # type: Optional[type]
from tornado.platform.asyncio import AnyThreadEventLoopPolicy
from ...config import Conf
from ..base_plugin import BasePlugin
#
# Plugin to capture the User Mouse Movements.
# User must input their name to use this plugin.
#
class LogFatiguePlugin(BasePlugin):
def __init__(self, workspace):
if not Slacrs:
raise Exception(
"Please install Slacrs to Initialize LogFatigue Plugin"
)
self._fatigue_flag = True
super().__init__(workspace)
self._fatigue = HumanFatigue()
self._strokes = []
self._main_window = workspace.view_manager.main_window
self._main_window.setMouseTracking(True)
self.EventFilterInstance = self.EventFilter(self._fatigue)
self._main_window.installEventFilter(self.EventFilterInstance)
self.modal = self.Modal(self)
self.modal.show()
self.t_log = threading.Thread(target=self._log_mouse, args=())
self.t_log.setDaemon(True)
self.t_log.start()
def _log_mouse(self):
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
session = Slacrs(database=Conf.checrs_backend_str).session()
with session.no_autoflush:
while self._fatigue_flag is True:
sleep(2)
if self._fatigue.user:
session.add(self._fatigue)
session.commit()
session.close()
class EventFilter(QObject):
def __init__(self, fatigue):
super().__init__()
self._fatigue = fatigue
self._strokes = []
def eventFilter(self, obj, event): # pylint: disable=unused-argument
if event.type() == QEvent.HoverMove:
x = event.pos().x()
old_x = event.oldPos().x()
y = event.pos().y()
old_y = event.oldPos().y()
self._fatigue.mouse_speed = int(
math.sqrt((x - old_x) ** 2 + (y - old_y) ** 2)
)
elif event.type() == QEvent.KeyPress:
timestamp = time.time()
i = 0
for i in range(len(self._strokes)):
if timestamp - self._strokes[i] <= 10:
break
self._strokes = self._strokes[i:]
self._strokes.append(timestamp)
self._fatigue.stroke = len(self._strokes)
return False
#
# Creates Model so user can input name, if modal is closed without submitting name,
# the LogFatigue plugin will be deactivated
#
class Modal(QDialog):
def __init__(self, outerclass):
super().__init__(outerclass._main_window)
self.label = QLabel("Enter your Name")
self.edit = QLineEdit("")
self.button = QPushButton("Submit")
self.outerclass = outerclass
layout = QVBoxLayout()
layout.addWidget(self.label)
layout.addWidget(self.edit)
layout.addWidget(self.button)
self.setLayout(layout)
self.button.clicked.connect(self.input)
def input(self):
self.outerclass._fatigue.user = self.edit.text()
self.close()
def closeEvent(self, event):
if not self.outerclass._fatigue.user:
self.outerclass._fatigue_flag = False
self.outerclass.workspace.plugins.deactivate_plugin(self.outerclass)
event.accept()
def teardown(self):
self._fatigue_flag = False
self.t_log.join()
|
machine.py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 25 12:05:14 2015
@author: ktritz
"""
from __future__ import print_function
from builtins import str, map, range
from collections import Sized, Iterable, Container, deque
import os
from warnings import warn
import numpy as np
import MDSplus as mds
import threading
from .logbook import Logbook
from .parse import parse_top, parse_machine
from .shot import Shot
from .globals import FDP_DIR, FdpError, FdpWarning
from .datasources import canonicalMachineName, MDS_SERVERS, EVENT_SERVERS
def machineClassFactory(name=''):
"""
Class factory to implement abstract machine class for specific machine
"""
machine_name = canonicalMachineName(name)
class_name = 'Machine' + machine_name.capitalize()
MachineClass = type(class_name, (Machine,), {})
MachineClass._name = machine_name
parse_top(MachineClass)
parse_machine(MachineClass)
return MachineClass
class Machine(Sized, Iterable, Container):
"""
Abstract base class for top-level machines
"""
_connections = None
_connected = False
_thread_events = None
_logbook = None
_modules = None
_parent = None
_name = None
def __init__(self, shotlist=None, xp=None, date=None):
self._shots = {} # shot dictionary with shot number (int) keys
self._make_server_connections()
self._set_modules()
if self._logbook is None:
self._logbook = Logbook(name=self._name, root=self)
if shotlist is not None or xp is not None or date is not None:
self.addshot(shotlist=shotlist, xp=xp, date=date)
def _make_server_connections(self):
if self._connections is None:
mds_server = MDS_SERVERS[self._name]
hostname = mds_server['hostname']
port = mds_server['port']
nconnections = 2
self._connections = []
self._thread_events = []
for i in range(nconnections):
self._connections.append(None)
self._thread_events.append(threading.Event())
def connection_wrapper(i):
connection = mds.Connection('{}:{}'.format(hostname, port))
connection.tree = None
self._connections[i] = connection
ev = self._thread_events[i]
ev.set()
for i in range(nconnections):
t = threading.Thread(target=connection_wrapper, args=(i,))
t.start()
# event_server = EVENT_SERVERS[self._name]
# self._eventConnection = mds.Connection('{}:{}'.format(event_server['hostname'],
# event_server['port']))
def _set_modules(self):
if self._modules is None:
machine_diag_dir = os.path.join(FDP_DIR, 'diagnostics', self._name)
self._modules = []
for module in os.listdir(machine_diag_dir):
diag_dir = os.path.join(machine_diag_dir, module)
if os.path.isdir(diag_dir) and module[0] is not '_':
self._modules.append(module)
def _validate_shot(self, shot):
if shot not in self._shots:
self._shots[shot] = Shot(shot, self)
def __getattr__(self, attr_name):
try:
shot = int(attr_name.split('s')[1])
self._validate_shot(shot)
return self._shots[shot]
except:
# import pdb
# pdb.set_trace()
raise AttributeError('bad attr: {}'.format(attr_name))
# raise
def __getitem__(self, shot):
self._validate_shot(shot)
return self._shots[shot]
def __delitem__(self, key):
del self._shots[key]
def __setitem__(self, item, value):
pass
def __dir__(self):
return ['s{}'.format(shot) for shot in self._shots.keys()]
def __contains__(self, key):
return key in self._shots
def __len__(self):
return len(self._shots)
def __iter__(self):
# iterate over Shot objects in _shots.values()
# (not over shot numbers in _shots.keys())
return iter(self._shots.values())
def __repr__(self):
return '<machine {}>'.format(self._name.upper())
def __str__(self):
return 'Machine {}'.format(self._name.upper())
def _get_logbook_credentials(self):
# override with methods/<machine>/_get_logbook_credentials.py
pass
def _get_connection(self, shot, tree):
if not self._connected:
for ev in self._thread_events:
ev.wait()
self._connected = True
for connection in self._connections:
if connection.tree == (tree, shot):
self._connections.remove(connection)
self._connections.insert(0, connection)
return connection
connection = self._connections.pop()
try:
connection.closeAllTrees()
except:
pass
try:
connection.openTree(tree, shot)
connection.tree = (tree, shot)
except:
# raise
connection.tree = None
# finally:
self._connections.insert(0, connection)
return connection
def _get_mdsshape(self, signal):
# if signal.shot is 0:
# print('No MDS data exists for model tree')
# return
connection = self._get_connection(signal.shot, signal.mdstree)
try:
usage_code = connection.get('getnci({},"USAGE")'.format(signal.mdsnode)).data()
length = connection.get('getnci({},"LENGTH")'.format(signal.mdsnode)).data()
if usage_code != 6 or length < 1:
raise ValueError
return connection.get('shape({})'.format(signal.mdsnode)).data()
except:
return
def _get_mdsdata(self, signal):
shot = signal.shot
# if shot is 0:
# print('No MDS data exists for model tree')
# return np.zeros(0)
connection = self._get_connection(shot, signal.mdstree)
if signal.mdstree.lower() == 'ptdata':
if 'Signal' in str(type(signal)):
mds_address = 'ptdata("{}", {:d})'.format(signal.mdsnode, shot)
elif 'Axis' in str(type(signal)):
mds_address = 'dim_of(ptdata("{}", {:d}))'.format(signal.mdsnode, shot)
else:
raise FdpError('bad mds data')
else:
mds_address = signal.mdsnode
try:
data = connection.get(mds_address)
except:
# raise
msg = 'MDSplus connection error for shot {}, tree {}, and node {}'.format(
signal.shot, signal.mdstree, mds_address)
raise FdpError(msg)
# warn(msg, FdpWarning)
# return np.zeros(0)
if getattr(signal, '_raw_of', None) is not None:
data = data.raw_of()
if getattr(signal, '_dim_of', None) is not None:
data = data.dim_of()
data = data.value_of().value
if signal._transpose is not None:
data = data.transpose(signal._transpose)
if hasattr(signal, '_postprocess'):
data = signal._postprocess(data)
return data
def _get_shots(self, xp=None, date=None):
shots = []
if date:
if not isinstance(date, (list, tuple)):
date = [date]
shots.extend(self._logbook.get_shotlist(date=list(date)))
if xp:
if not isinstance(xp, (list, tuple)):
xp = [xp]
shots.extend(self._logbook.get_shotlist(xp=list(xp)))
return shots
def addshot(self, shotlist=None, date=None, xp=None):
"""
Load shots
"""
shots = []
if shotlist:
if not isinstance(shotlist, (list, tuple)):
shotlist = [shotlist]
shots.extend(list(shotlist))
shots.extend(self._get_shots(xp=xp, date=date))
for shot in shots:
self._validate_shot(shot)
def shotlist(self, xp=None, date=None, quiet=False):
"""
Generate shotlist
"""
if xp or date:
shotlist = self._get_shots(xp=xp, date=date)
else:
shotlist = list(self._shots.keys())
shotlist.sort()
if not quiet:
for shotnum in shotlist:
shot = self[shotnum]
print('{} in XP {} on {}'.format(shot.shot, shot.xp, shot.date))
return shotlist
def filter(self, date=None, xp=None):
"""
Get a AbstractMachine-like object with an immutable shotlist for XP(s)
or date(s)
"""
self.addshot(xp=xp, date=date)
return ImmutableMachine(xp=xp, date=date, parent=self)
def setevent(self, event, shot_number=None, data=None):
event_data = bytearray()
if shot_number is not None:
shot_data = shot_number // 256**np.arange(4) % 256
event_data.extend(shot_data.astype(np.ubyte))
if data is not None:
event_data.extend(str(data))
mdsdata = mds.mdsdata.makeData(np.array(event_data))
event_string = 'setevent("{}", {})'.format(event, mdsdata)
status = self._eventConnection.get(event_string)
return status
def wfevent(self, event, timeout=0):
event_string = 'kind(_data=wfevent("{}",*,{})) == 0BU ? "timeout"' \
': _data'.format(event, timeout)
data = self._eventConnection.get(event_string).value
if type(data) is str:
raise FdpError('Timeout after {}s in wfevent'.format(timeout))
if not data.size:
return None
if data.size > 3:
shot_data = data[0:4]
shot_number = np.sum(shot_data * 256**np.arange(4))
data = data[4:]
return shot_number, ''.join(map(chr, data))
return data
def find(self, tag, obj=None):
root = getattr(self, '_root', self)
find_list = set([])
for module in root.s0._modules:
module_obj = getattr(root.s0, module)
container_queue = deque([module_obj])
while True:
try:
container = container_queue.popleft()
container._set_dynamic_containers()
container_queue.extend(list(container._containers.values()))
if obj is None or obj.lower() == 'signal':
for signal in list(container._signals.values()):
if signal._contains(tag):
branch_str = '.'.join([signal._get_branch(),
signal._name])
find_list.add(branch_str)
if obj is None or obj.lower() == 'axis':
for signal in list(container._signals.values()):
for axis_str in signal.axes:
axis = getattr(signal, axis_str)
if axis._contains(tag):
branch_str = '.'.join([signal._get_branch(),
signal._name, axis._name])
find_list.add(branch_str)
if obj is None or obj.lower() == 'container':
if container._contains(tag):
find_list.add(container._get_branch())
except IndexError:
break
find_list = list(find_list)
find_list.sort()
return find_list
# machine classes
Nstxu = machineClassFactory('nstxu')
D3D = machineClassFactory('d3d')
class ImmutableMachine(Sized, Iterable, Container):
"""
An immutable AbstractMachine-like class for dates and XPs.
The shotlist is auto-loaded based on date or XP, and the shotlist
can not be modified.
AbstractMachine.filter_shots() returns an ImmutableMachine object.
**Usage**::
>>> xp1013 = fdp.nstxu.filter_shots(xp=1013)
>>> for shot in xp1013:
... shot.mpts.te.plot()
...
"""
def __init__(self, xp=None, date=None, parent=None):
self._shots = {}
self._parent = parent
self._name = self._parent._name
shotlist = self._parent.shotlist(xp=xp, date=date, quiet=True)
for shot in shotlist:
self._shots[shot] = self._parent[shot]
def __getattr__(self, name):
try:
shot = int(name.split('s')[1])
return self[shot]
except:
raise AttributeError('bad attr: {}'.format(name))
def __repr__(self):
return '<immutable machine {}>'.format(self._name.upper())
def __iter__(self):
return iter(self._shots.values())
def __contains__(self, key):
return key in self._shots
def __len__(self):
return len(self._shots)
def __delitem__(self, item):
pass
def __getitem__(self, item):
return self._shots[item]
def __dir__(self):
return ['s{}'.format(shot) for shot in self]
def shotlist(self, quiet=False):
shotlist = list(self._shots.keys())
shotlist.sort()
if not quiet:
for shotnum in shotlist:
shot = self[shotnum]
print('{} in XP {} on {}'.format(shot.shot, shot.xp, shot.date))
return shotlist
|
timer.py | """Timer"""
import time
import functools
from threading import Thread
class Chronometer:
"""Simple chronometer with context manager support
# Properties
partial: float, current couting in seconds
running: boolean, chronometer current state
# Example
```python
import time
chr = Chronometer()
chr.start()
time.sleep(5)
print('Partial:', chr.partial)
chr.stop()
assert not chr.running
chr.reset()
# or use with the context manager
with Chronometer() as chr:
assert chr.running
time.sleep(5)
print('Partial:', chr.partial)
```
"""
def __init__(self):
self.__running = False
self.__start = None
self.__end = None
self.__stop_partial = 0
def __enter__(self):
self.start()
return self
def __exit__(self, *i):
self.stop()
def start(self):
"""Starts the chronometer or resume the latest stop"""
if self.__running == True:
return
self.__running = True
self.__start = time.clock()
def reset(self):
"""Call Chronometer.stop(reset=True)"""
self.stop(reset=True)
def stop(self, reset=False):
"""Stops the chronometer
# Arguments
reset: optional, boolean, default `False`
- `True` - Reset the chronometer to zero
- `False` - Maintain the current count
"""
if reset:
self.__stop_partial = 0
else:
self.__stop_partial = self.partial
self.__running = False
@property
def partial(self):
if self.__running:
return time.clock() - self.__start + self.__stop_partial
else:
return self.__stop_partial
@property
def running(self):
return self.__running
class ChronometerDecorator(Chronometer):
"""Chronometer Decorator
Use this decorator to measure time when executing functions
or methods.
# Example
```
import time
@ChronometerDecorator()
def foo(msg):
print(msg)
time.sleep(3)
@ChronometerDecorator(print_sum=True)
def bar():
time.sleep(1)
class baz:
def __init__(self):
self.message = 'Works on methods as well!'
@ChronometerDecorator(method=True)
def qux(self):
time.sleep(3)
print(self.message)
foo('Starting ChronometerDecorator test!')
for _ in range(3): bar()
baz().qux()
''' Result example:
Starting ChronometerDecorator test!
[ChronometerDecorator]: Function foo > Time: 2.9994740292006554
[ChronometerDecorator]: Function bar > Time: 1.000767622853573 - Total: 1.000767622853573
[ChronometerDecorator]: Function bar > Time: 0.9999331681037802 - Total: 2.000700790957353
[ChronometerDecorator]: Function bar > Time: 0.9999622418665695 - Total: 3.0006630328239225
Works on methods as well!
[ChronometerDecorator]: Method qux > Time: 3.0000543717121673
'''
```
"""
def __init__(self, method=False, print_sum=False):
self.time_ls = []
self.method = method
self.print_sum = print_sum
super().__init__()
def __call__(self, function=None):
self._function = function
self.func_name = function.__name__
if self.method:
return functools.partialmethod(ChronometerDecorator.method_call, self)
else:
return self.function_call
def function_call(self, *a, **kw):
self.start()
self._function(*a, **kw)
self.stop()
self.register()
@staticmethod
def method_call(func_self, self, *a, **kw):
self.start()
self._function(func_self, *a, **kw)
self.stop()
self.register()
def register(self):
self.time_ls.append(self.partial)
msg_sum = ''
if self.print_sum:
msg_sum = ' - Total: {}'.format(sum(self.time_ls))
msg_func_name = 'Method' if self.method else 'Function'
msg_func_name += ' {} >'.format(self.func_name)
msg = '[ChronometerDecorator]: {} Time: {}{}'\
.format(msg_func_name, self.partial, msg_sum)
print(msg)
self.reset()
class Timer(Chronometer):
"""Countdown Timer with callback
Timer inherit from [Chronometer](#chronometer-class)
# Arguments
time: int, required
- Start of the countdown in seconds
callback: function, optional, default `None`
- Function to be triggered when the countdown hits zero
# Properties
time_left: float, time left in seconds
# Example
```python
import time
def time_out():
print('Time out!')
timer = Timer(30, callback=time_out)
with timer:
assert timer.running
print('Burning 10 seconds...')
time.sleep(10)
assert not timer.running
print('Time left:', timer.time_left)
print('Waiting without running the clock...')
time.sleep(10)
print('Time left:', timer.time_left)
with timer:
print('Running the clock till timeout')
while timer.running:
print('> + 15 seconds...')
time.sleep(15)
print('Finished!')
```
"""
def __init__(self, time, callback=None):
super().__init__()
self.time_limit = time
self.callback = callback
self.time_out = False
def __copy__(self):
"""Custom copy to clean the `callback` function"""
new = type(self)(None, None)
new.__dict__.update(self.__dict__)
new.callback = None
return self
def __deepcopy__(self, memo):
return self.__copy__()
def start(self):
"""Starts the countdown and starts the thread (`_time_out` method)
which will trigger the `callback`
"""
if self.time_out:
self.reset()
super().start()
Thread(target=self._time_out).start()
def stop(self, **i):
"""Stops the countdown and stops the running thread"""
super().stop(**i)
def reset(self):
super().reset()
self.time_out = False
@property
def time_left(self):
time_left = self.time_limit - self.partial
time_left = time_left if time_left > 0 else 0
return time_left
def _time_out(self):
"""Asynchronous method that trigger the `callback` function
when the `time_left` is zero
"""
while True:
if self.running:
if self.time_left > 0:
time.sleep(.3)
else:
self.stop()
self.time_out = True
if self.callback:
self.callback()
break
else:
break
|
dmlc_mpi.py | #!/usr/bin/env python
"""
DMLC submission script, MPI version
"""
import argparse
import sys
import os
import subprocess
import tracker
from threading import Thread
parser = argparse.ArgumentParser(description='DMLC script to submit dmlc job using MPI')
parser.add_argument('-n', '--nworker', required=True, type=int,
help = 'number of worker proccess to be launched')
parser.add_argument('-s', '--server-nodes', default = 0, type=int,
help = 'number of server nodes to be launched')
parser.add_argument('--log-level', default='INFO', type=str,
choices=['INFO', 'DEBUG'],
help = 'logging level')
parser.add_argument('--log-file', type=str,
help = 'output log to the specific log file')
parser.add_argument('-H', '--hostfile', type=str,
help = 'the hostfile of mpi server')
parser.add_argument('command', nargs='+',
help = 'command for dmlc program')
parser.add_argument('--host-ip', type=str,
help = 'the scheduler ip', default='ip')
args, unknown = parser.parse_known_args()
#
# submission script using MPI
#
def get_mpi_env(envs):
"""get the mpirun command for setting the envornment
support both openmpi and mpich2
"""
outfile="/tmp/mpiver"
os.system("mpirun -version 1>/tmp/mpiver 2>/tmp/mpiver")
with open (outfile, "r") as infile:
mpi_ver = infile.read()
cmd = ''
if 'Open MPI' in mpi_ver:
for k, v in envs.items():
cmd += ' -x %s=%s' % (k, str(v))
elif 'mpich' in mpi_ver:
for k, v in envs.items():
cmd += ' -env %s %s' % (k, str(v))
else:
raise Exception('unknow mpi version %s' % (mpi_ver))
return cmd
def mpi_submit(nworker, nserver, pass_envs):
"""
customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
nworker number of slave process to start up
nserver number of server nodes to start up
pass_envs enviroment variables to be added to the starting programs
"""
def run(prog):
""""""
subprocess.check_call(prog, shell = True)
cmd = ''
if args.hostfile is not None:
cmd = '--hostfile %s' % (args.hostfile)
cmd += ' ' + ' '.join(args.command) + ' ' + ' '.join(unknown)
# start servers
if nserver > 0:
pass_envs['DMLC_ROLE'] = 'server'
prog = 'mpirun -n %d %s %s' % (nserver, get_mpi_env(pass_envs), cmd)
thread = Thread(target = run, args=(prog,))
thread.setDaemon(True)
thread.start()
if nworker > 0:
pass_envs['DMLC_ROLE'] = 'worker'
prog = 'mpirun -n %d %s %s' % (nworker, get_mpi_env(pass_envs), cmd)
thread = Thread(target = run, args=(prog,))
thread.setDaemon(True)
thread.start()
tracker.config_logger(args)
tracker.submit(args.nworker, args.server_nodes, fun_submit = mpi_submit,
hostIP=args.host_ip,
pscmd=(' '.join(args.command) + ' ' + ' '.join(unknown)))
|
bridge.py |
import threading as mt
from ..logger import Logger
from ..profile import Profiler
from ..config import Config
from ..json_io import read_json, write_json
# ------------------------------------------------------------------------------
#
class Bridge(object):
'''
A bridge can be configured to have a finite lifetime: when no messages are
received in `timeout` seconds, the bridge process will terminate.
'''
# --------------------------------------------------------------------------
#
@staticmethod
def get_config(name, pwd=None):
if not pwd:
pwd = '.'
cfg = Config(cfg=read_json('%s/%s.cfg' % (pwd, name)))
return cfg
# --------------------------------------------------------------------------
#
def __init__(self, cfg):
self._cfg = cfg
self._channel = self._cfg.channel
self._uid = self._cfg.uid
self._pwd = self._cfg.path
self._log = Logger(name=self._uid, ns='radical.utils',
level=self._cfg.log_lvl, path=self._pwd)
self._prof = Profiler(name=self._uid, path=self._pwd)
if 'hb' in self._uid or 'heartbeat' in self._uid:
self._prof.disable()
else:
self._prof.disable()
self._prof.prof('init', uid=self._uid, msg=self._pwd)
self._log.debug('bridge %s init', self._uid)
self._bridge_initialize()
write_json('%s/%s.cfg' % (self._pwd, self._cfg.uid),
{'uid' : self._cfg.uid,
self.type_in : str(self.addr_in),
self.type_out: str(self.addr_out)})
# --------------------------------------------------------------------------
#
@property
def name(self):
return self._uid
@property
def uid(self):
return self._uid
@property
def channel(self):
return self._channel
# protocol independent addr query
@property
def type_in(self):
raise NotImplementedError()
@property
def type_out(self):
raise NotImplementedError()
@property
def addr_in(self):
raise NotImplementedError()
@property
def addr_out(self):
raise NotImplementedError()
def _bridge_initialize(self):
raise NotImplementedError()
def _bridge_work(self):
raise NotImplementedError()
# --------------------------------------------------------------------------
#
def start(self):
# the bridge runs in a thread. It is the bridge's owner process'
# responsibility to ensure the thread is seeing suffient time to perform
# as needed. Given Python's thread performance (or lack thereof), this
# basically means that the user of this class should create a separate
# process instance to host the bridge thread.
self._term = mt.Event()
self._bridge_thread = mt.Thread(target=self._bridge_work)
self._bridge_thread.daemon = True
self._bridge_thread.start()
self._log.info('started bridge %s', self._uid)
# --------------------------------------------------------------------------
#
@staticmethod
def create(cfg):
# NOTE: I'd rather have this as class data than as stack data, but
# python stumbles over circular imports at that point :/
# Another option though is to discover and dynamically load
# components.
from .pubsub import PubSub
from .queue import Queue
_btypemap = {'pubsub' : PubSub,
'queue' : Queue}
kind = cfg['kind']
if kind not in _btypemap:
raise ValueError('unknown bridge type (%s)' % kind)
btype = _btypemap[kind]
bridge = btype(cfg)
return bridge
# --------------------------------------------------------------------------
#
def stop(self):
self._term.set()
# self._bridge_thread.join(timeout=timeout)
self._prof.prof('term', uid=self._uid)
# if timeout is not None:
# return not self._bridge_thread.is_alive()
# --------------------------------------------------------------------------
#
@property
def alive(self):
return self._bridge_thread.is_alive()
# ------------------------------------------------------------------------------
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import colorama
import base64
import binascii
import datetime
import errno
import io
import json
import os
import os.path
import platform
import random
import re
import shutil
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
import zipfile
from distutils.version import StrictVersion
from math import isnan
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import (ResourceNotFoundError,
ClientRequestError,
ArgumentUsageError,
InvalidArgumentValueError,
MutuallyExclusiveArgumentError,
ValidationError,
UnauthorizedError)
from azure.cli.core._profile import Profile
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_agent_pools
from ._client_factory import get_msi_client
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type, _set_outbound_type,
_parse_comma_separated_list)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME
from ._consts import CONST_MONITORING_ADDON_NAME
from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME
from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME
from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME
from ._consts import CONST_AZURE_POLICY_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME
from ._consts import CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID
from ._consts import CONST_INGRESS_APPGW_WATCH_NAMESPACE
from ._consts import CONST_CONFCOM_ADDON_NAME, CONST_ACC_SGX_QUOTE_HELPER_ENABLED
from ._consts import ADDONS
from ._consts import CONST_CANIPULL_IMAGE
from ._consts import CONST_PRIVATE_DNS_ZONE_SYSTEM
from ._consts import CONST_MANAGED_IDENTITY_OPERATOR_ROLE, CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(
cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError(
'Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(
name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError(
'Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(
_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError(
'Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
# added in python 2.7.13 and 3.6
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def _unzip(src, dest):
logger.debug('Extracting %s to %s.', src, dest)
system = platform.system()
if system in ('Linux', 'Darwin', 'Windows'):
with zipfile.ZipFile(src, 'r') as zipObj:
zipObj.extractall(dest)
else:
raise CLIError('The current system is not supported.')
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError(
'Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None, base_src_url=None,
kubelogin_version='latest', kubelogin_install_location=None,
kubelogin_base_src_url=None):
k8s_install_kubectl(cmd, client_version, install_location, base_src_url)
k8s_install_kubelogin(cmd, kubelogin_version,
kubelogin_install_location, kubelogin_base_src_url)
def k8s_install_kubectl(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubectl, a command-line interface for Kubernetes clusters.
"""
if not source_url:
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(
install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"',
install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError(
'Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip(
'\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_kubelogin(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubelogin, a client-go credential (exec) plugin implementing azure authentication.
"""
cloud_name = cmd.cli_ctx.cloud.name
if not source_url:
source_url = 'https://github.com/Azure/kubelogin/releases/download'
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubelogin'
if client_version == 'latest':
context = _ssl_context()
latest_release_url = 'https://api.github.com/repos/Azure/kubelogin/releases/latest'
if cloud_name.lower() == 'azurechinacloud':
latest_release_url = 'https://mirror.azure.cn/kubernetes/kubelogin/latest'
latest_release = urlopen(latest_release_url, context=context).read()
client_version = json.loads(latest_release)['tag_name'].strip()
else:
client_version = "v%s" % client_version
base_url = source_url + '/{}/kubelogin.zip'
file_url = base_url.format(client_version)
# ensure installation directory exists
install_dir, cli = os.path.dirname(
install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
system = platform.system()
if system == 'Windows':
sub_dir, binary_name = 'windows_amd64', 'kubelogin.exe'
elif system == 'Linux':
# TODO: Support ARM CPU here
sub_dir, binary_name = 'linux_amd64', 'kubelogin'
elif system == 'Darwin':
sub_dir, binary_name = 'darwin_amd64', 'kubelogin'
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
with tempfile.TemporaryDirectory() as tmp_dir:
try:
download_path = os.path.join(tmp_dir, 'kubelogin.zip')
logger.warning('Downloading client to "%s" from "%s"',
download_path, file_url)
_urlretrieve(file_url, download_path)
except IOError as ex:
raise CLIError(
'Connection error while attempting to download client ({})'.format(ex))
_unzip(download_path, tmp_dir)
download_path = os.path.join(tmp_dir, 'bin', sub_dir, binary_name)
shutil.move(download_path, install_location)
os.chmod(install_location, os.stat(install_location).st_mode |
stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip(
'\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result, aad_session_key = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal',
value=0.1 * x, total_val=1.0)
try:
create_service_principal(
cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False, aad_session_key
hook.add(message='Finished service principal creation',
value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal, aad_session_key
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate',
value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate',
value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(
cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError(
'When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(
default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(
default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict(
{"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(
_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
def _get_user_assigned_identity_resource_id_regular_expression():
return re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)',
flags=re.IGNORECASE)
def _get_user_assigned_identity(cli_ctx, resource_id):
resource_id = resource_id.lower()
_re_user_assigned_identity_resource_id = _get_user_assigned_identity_resource_id_regular_expression()
match = _re_user_assigned_identity_resource_id.search(resource_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
identity_name = match.group(3)
msi_client = get_msi_client(cli_ctx, subscription_id)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise ResourceNotFoundError("Identity {} not found.".format(resource_id))
raise ClientRequestError(ex.message)
return identity
raise InvalidArgumentValueError(
"Cannot parse identity name from provided resource id {}.".format(resource_id))
def _get_user_assigned_identity_client_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).client_id
def _get_user_assigned_identity_object_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).principal_id
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError(
'Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(
name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(
windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
DeploymentProperties = cmd.get_models(
'DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(
template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
Deployment = cmd.get_models(
'Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
validation_poller = smc.begin_validate(
resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return smc.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, smc.begin_create_or_update, resource_group_name, deployment_name, deployment)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(
name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError(
'Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(
path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning(
'Failed to merge credentials to kube config file: %s', exc)
logger.warning(
'The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if not existing.get(key):
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError(
'failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(
stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(
current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.begin_create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError(
"service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
result = client.create(app_create_param, raw=True)
return result.output, result.response.headers["ocp-aad-session-key"]
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(
reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError(
'specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(
filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(
resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(
cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(
role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
# TODO: track2/remove custom headers, depends on 'azure.mgmt.authorization'
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(
scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(
filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError(
"No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def aks_check_acr(cmd, client, resource_group_name, name, acr):
if not which("kubectl"):
raise ValidationError("Can not find kubectl executable in PATH")
_, browse_path = tempfile.mkstemp()
aks_get_credentials(
cmd, client, resource_group_name, name, admin=False, path=browse_path
)
# Get kubectl minor version
kubectl_minor_version = -1
try:
cmd = f"kubectl version -o json --kubeconfig {browse_path}"
output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
jsonS, _ = output.communicate()
kubectl_version = json.loads(jsonS)
kubectl_minor_version = int(kubectl_version["clientVersion"]["minor"])
kubectl_server_minor_version = int(
kubectl_version["serverVersion"]["minor"])
kubectl_server_patch = int(
kubectl_version["serverVersion"]["gitVersion"].split(".")[-1])
if kubectl_server_minor_version < 17 or (kubectl_server_minor_version == 17 and kubectl_server_patch < 14):
logger.warning('There is a known issue for Kubernetes versions < 1.17.14 when connecting to '
'ACR using MSI. See https://github.com/kubernetes/kubernetes/pull/96355 for'
'more information.')
except subprocess.CalledProcessError as err:
raise ValidationError(
"Could not find kubectl minor version: {}".format(err))
if kubectl_minor_version == -1:
raise ValidationError("Failed to get kubectl version")
podName = "canipull-" + str(uuid.uuid4())
overrides = {
"spec": {
"restartPolicy": "Never",
"hostNetwork": True,
"containers": [
{
"securityContext": {"runAsUser": 0},
"name": podName,
"image": CONST_CANIPULL_IMAGE,
"args": ["-v6", acr],
"stdin": True,
"stdinOnce": True,
"tty": True,
"volumeMounts": [
{"name": "azurejson", "mountPath": "/etc/kubernetes"},
{"name": "sslcerts", "mountPath": "/etc/ssl/certs"},
],
}
],
"tolerations": [
{"key": "CriticalAddonsOnly", "operator": "Exists"},
{"effect": "NoExecute", "operator": "Exists"},
],
"volumes": [
{"name": "azurejson", "hostPath": {"path": "/etc/kubernetes"}},
{"name": "sslcerts", "hostPath": {"path": "/etc/ssl/certs"}},
],
"nodeSelector": {"kubernetes.io/os": "linux"},
}
}
try:
cmd = [
"kubectl",
"run",
"--kubeconfig",
browse_path,
"--rm",
"--quiet",
"--image",
CONST_CANIPULL_IMAGE,
"--overrides",
json.dumps(overrides),
"-it",
podName,
]
# Support kubectl versons < 1.18
if kubectl_minor_version < 18:
cmd += ["--generator=run-pod/v1"]
output = subprocess.check_output(
cmd,
universal_newlines=True,
)
except subprocess.CalledProcessError as err:
raise CLIError("Failed to check the ACR: {}".format(err))
if output:
print(output)
else:
raise CLIError("Failed to check the ACR.")
# pylint: disable=too-many-statements,too-many-branches
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=False))
# open portal view if addon is not enabled or k8s version >= 1.19.0
if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled):
subscription_id = get_subscription_id(cmd.cli_ctx)
dashboardURL = (
# Azure Portal URL (https://portal.azure.com for public cloud)
cmd.cli_ctx.cloud.endpoints.portal +
('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService'
'/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name)
)
if in_cloud_console():
logger.warning(
'To view the Kubernetes resources view, please open %s in a new tab', dashboardURL)
else:
logger.warning('Kubernetes resources view on %s', dashboardURL)
if not disable_browser:
webbrowser.open_new_tab(dashboardURL)
return
# otherwise open the kube-dashboard addon
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post(
'http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning(
'To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning(
'"--address" is only supported in kubectl v1.13 and later.')
logger.warning(
'The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig",
browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError(
'Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_MONITORING_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME], 'identity')) and
(hasattr(
result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def _add_ingress_appgw_addon_role_assignment(result, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_INGRESS_APPGW_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME], 'identity')) and
(hasattr(
result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity, 'object_id'))
):
service_principal_msi_id = result.addon_profiles[
CONST_INGRESS_APPGW_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
config = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config
from msrestazure.tools import parse_resource_id, resource_id
if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in config:
appgw_id = config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID]
parsed_appgw_id = parse_resource_id(appgw_id)
appgw_group_id = resource_id(subscription=parsed_appgw_id["subscription"],
resource_group=parsed_appgw_id["resource_group"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=appgw_group_id):
logger.warning('Could not create a role assignment for application gateway: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', appgw_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_ID in config:
subnet_id = config[CONST_INGRESS_APPGW_SUBNET_ID]
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_msi_id, is_service_principal, scope=subnet_id):
logger.warning('Could not create a role assignment for subnet: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', subnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_CIDR in config:
if result.agent_pool_profiles[0].vnet_subnet_id is not None:
parsed_subnet_vnet_id = parse_resource_id(
result.agent_pool_profiles[0].vnet_subnet_id)
vnet_id = resource_id(subscription=parsed_subnet_vnet_id["subscription"],
resource_group=parsed_subnet_vnet_id["resource_group"],
namespace="Microsoft.Network",
type="virtualNetworks",
name=parsed_subnet_vnet_id["name"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual network: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', vnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
def _add_virtual_node_role_assignment(cmd, result, vnet_subnet_id):
# Remove trailing "/subnets/<SUBNET_NAME>" to get the vnet id
vnet_id = vnet_subnet_id.rpartition('/')[0]
vnet_id = vnet_id.rpartition('/')[0]
service_principal_msi_id = None
is_service_principal = False
os_type = 'Linux'
addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(addon_name in result.addon_profiles) and
(hasattr(result.addon_profiles[addon_name], 'identity')) and
(hasattr(result.addon_profiles[addon_name].identity, 'object_id'))
):
logger.info('virtual node MSI exists, using it')
service_principal_msi_id = result.addon_profiles[addon_name].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual node addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
uptime_sla=False,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
enable_private_cluster=False,
private_dns_zone=None,
fqdn_subdomain=None,
enable_managed_identity=True,
assign_identity=None,
attach_acr=None,
enable_aad=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_encryption_at_host=False,
assign_kubelet_identity=None,
enable_ultra_ssd=False,
no_wait=False,
yes=False,
enable_azure_rbac=False):
ManagedClusterWindowsProfile = cmd.get_models('ManagedClusterWindowsProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterSKU = cmd.get_models('ManagedClusterSKU',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ContainerServiceNetworkProfile = cmd.get_models('ContainerServiceNetworkProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ContainerServiceLinuxProfile = cmd.get_models('ContainerServiceLinuxProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterServicePrincipalProfile = cmd.get_models('ManagedClusterServicePrincipalProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ContainerServiceSshConfiguration = cmd.get_models('ContainerServiceSshConfiguration',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ContainerServiceSshPublicKey = cmd.get_models('ContainerServiceSshPublicKey',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterAADProfile = cmd.get_models('ManagedClusterAADProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterAgentPoolProfile = cmd.get_models('ManagedClusterAgentPoolProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterIdentity = cmd.get_models('ManagedClusterIdentity',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterPropertiesIdentityProfileValue = cmd.get_models('ManagedClusterPropertiesIdentityProfileValue',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedCluster = cmd.get_models('ManagedCluster',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties = cmd.get_models(
'Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if dns_name_prefix and fqdn_subdomain:
raise MutuallyExclusiveArgumentError(
'--dns-name-prefix and --fqdn-subdomain cannot be used at same time')
if not dns_name_prefix and not fqdn_subdomain:
dns_name_prefix = _get_default_dns_prefix(
name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(
load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError(
'--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
# Must be 12 chars or less before ACS RP adds to it
name=_trim_nodepoolname(nodepool_name),
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type,
mode="System"
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool_profile.os_disk_type = node_osdisk_type
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(
admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username or windows_admin_password:
# To avoid that windows_admin_password is set but windows_admin_username is not
if windows_admin_username is None:
try:
from knack.prompting import prompt
windows_admin_username = prompt('windows_admin_username: ')
# The validation for admin_username in ManagedClusterWindowsProfile will fail even if
# users still set windows_admin_username to empty here
except NoTTYException:
raise CLIError(
'Please specify username for Windows in non-interactive mode.')
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(
msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError(
'Please specify both username and password in non-interactive mode.')
windows_license_type = None
if enable_ahub:
windows_license_type = 'Windows_Server'
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type)
# If customer explicitly provide a service principal, disable managed identity.
if service_principal and client_secret:
enable_managed_identity = False
# Skip create service principal profile for the cluster if the cluster
# enables managed identity and customer doesn't explicitly provide a service principal.
service_principal_profile = None
principal_obj = None
if not(enable_managed_identity and not service_principal and not client_secret):
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
fqdn_subdomain=fqdn_subdomain, location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
need_post_creation_vnet_permission_granting = False
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
# if service_principal_profile is None, then this cluster is an MSI cluster,
# and the service principal does not exist. Two cases:
# 1. For system assigned identity, we just tell user to grant the
# permission after the cluster is created to keep consistent with portal experience.
# 2. For user assigned identity, we can grant needed permission to
# user provided user assigned identity before creating managed cluster.
if service_principal_profile is None and not assign_identity:
msg = ('It is highly recommended to use USER assigned identity '
'(option --assign-identity) when you want to bring your own'
'subnet, which will have no latency for the role assignment to '
'take effect. When using SYSTEM assigned identity, '
'azure-cli will grant Network Contributor role to the '
'system assigned identity after the cluster is created, and '
'the role assignment will take some time to take effect, see '
'https://docs.microsoft.com/en-us/azure/aks/use-managed-identity, '
'proceed to create cluster with system assigned identity?')
if not yes and not prompt_y_n(msg, default="n"):
return None
need_post_creation_vnet_permission_granting = True
else:
scope = vnet_subnet_id
identity_client_id = ""
if assign_identity:
identity_client_id = _get_user_assigned_identity_client_id(
cmd.cli_ctx, assign_identity)
else:
identity_client_id = service_principal_profile.client_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
identity_client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
cmd,
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
# Attach acr operation will be handled after the cluster is created
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
outbound_type = _set_outbound_type(
outbound_type, vnet_subnet_id, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip,
docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError(
'Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku.lower() == "basic":
network_profile = ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku.lower(),
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id,
aci_subnet_name,
vnet_subnet_id,
appgw_name,
appgw_subnet_cidr,
appgw_id,
appgw_subnet_id,
appgw_watch_namespace,
enable_sgxquotehelper
)
monitoring = False
if CONST_MONITORING_ADDON_NAME in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(
cmd, addon_profiles[CONST_MONITORING_ADDON_NAME])
# addon is in the list and is enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in addon_profiles and \
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in addon_profiles:
enable_virtual_node = True
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
if disable_rbac and enable_azure_rbac:
raise ArgumentUsageError(
'"--enable-azure-rbac" can not be used together with "--disable-rbac"')
aad_profile = ManagedClusterAADProfile(
managed=True,
enable_azure_rbac=enable_azure_rbac,
# ids -> i_ds due to track 2 naming issue
admin_group_object_i_ds=_parse_comma_separated_list(
aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if enable_azure_rbac is True:
raise ArgumentUsageError(
'"--enable-azure-rbac" can only be used together with "--enable-aad"')
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if enable_private_cluster and load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
if api_server_authorized_ip_ranges or enable_private_cluster:
api_server_access_profile = _populate_api_server_access_profile(
cmd,
api_server_authorized_ip_ranges,
enable_private_cluster=enable_private_cluster
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError(
'specify either "--disable-rbac" or "--enable-rbac", not both.')
identity = None
if not enable_managed_identity and assign_identity:
raise ArgumentUsageError(
'--assign-identity can only be specified when --enable-managed-identity is specified')
if enable_managed_identity and not assign_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif enable_managed_identity and assign_identity:
user_assigned_identity = {
# pylint: disable=line-too-long
assign_identity: Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties()
}
identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
identity_profile = None
if assign_kubelet_identity:
if not assign_identity:
# pylint: disable=line-too-long
raise ArgumentUsageError('--assign-kubelet-identity can only be specified when --assign-identity is specified')
kubelet_identity = _get_user_assigned_identity(cmd.cli_ctx, assign_kubelet_identity)
identity_profile = {
'kubeletidentity': ManagedClusterPropertiesIdentityProfileValue(
resource_id=assign_kubelet_identity,
client_id=kubelet_identity.client_id,
object_id=kubelet_identity.principal_id
)
}
cluster_identity_object_id = _get_user_assigned_identity_object_id(cmd.cli_ctx, assign_identity)
# ensure the cluster identity has "Managed Identity Operator" role at the scope of kubelet identity
_ensure_cluster_identity_permission_on_kubelet_identity(
cmd.cli_ctx,
cluster_identity_object_id)
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
api_server_access_profile=api_server_access_profile,
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id,
identity_profile=identity_profile
)
use_custom_private_dns_zone = False
if private_dns_zone:
if not enable_private_cluster:
raise InvalidArgumentValueError("Invalid private dns zone for public cluster. "
"It should always be empty for public cluster")
mc.api_server_access_profile.private_dns_zone = private_dns_zone
from msrestazure.tools import is_valid_resource_id
if private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_SYSTEM:
if is_valid_resource_id(private_dns_zone):
use_custom_private_dns_zone = True
else:
raise InvalidArgumentValueError(
private_dns_zone + " is not a valid Azure resource ID.")
if fqdn_subdomain:
if not use_custom_private_dns_zone:
raise ArgumentUsageError("--fqdn-subdomain should only be used for "
"private cluster with custom private dns zone")
mc.fqdn_subdomain = fqdn_subdomain
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
# Add AAD session key to header.
# If principal_obj is None, we will not add this header, this can happen
# when the cluster enables managed identity. In this case, the header is useless
# and that's OK to not add this header
custom_headers = None
if principal_obj:
custom_headers = {
'Ocp-Aad-Session-Key': principal_obj.get("aad_session_key")}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
created_cluster = _put_managed_cluster_ensuring_permission(
cmd,
client,
subscription_id,
resource_group_name,
name,
mc,
monitoring,
ingress_appgw_addon_enabled,
enable_virtual_node,
need_post_creation_vnet_permission_granting,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
custom_headers,
no_wait)
return created_cluster
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name,
appgw_name=appgw_name,
appgw_subnet_cidr=appgw_subnet_cidr,
appgw_id=appgw_id,
appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper,
no_wait=no_wait)
enable_monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
virtual_node_addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
enable_virtual_node = (virtual_node_addon_name in instance.addon_profiles and
instance.addon_profiles[virtual_node_addon_name].enabled)
need_pull_for_result = enable_monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_pull_for_result:
if enable_monitoring:
_ensure_container_insights_for_monitoring(
cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME])
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(
client.begin_create_or_update(resource_group_name, name, instance))
if enable_monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(
result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
_add_virtual_node_role_assignment(
cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, name, instance)
return result
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name)
# Check if KUBECONFIG environmental variable is set
# If path is different than default then that means -f/--file is passed
# in which case we ignore the KUBECONFIG variable
if "KUBECONFIG" in os.environ and path == os.path.join(os.path.expanduser('~'), '.kube', 'config'):
path = os.environ["KUBECONFIG"]
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(
encoding='UTF-8')
_print_or_merge_credentials(
path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
ManagedClusterServicePrincipalProfile = cmd.get_models('ManagedClusterServicePrincipalProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError(
'usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError(
'usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=service_principal, secret=client_secret
)
return sdk_no_wait(no_wait,
client.begin_reset_service_principal_profile,
resource_group_name,
name, service_principal_profile)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.begin_reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError(
"Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None,
uptime_sla=False,
no_uptime_sla=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
windows_admin_password=None,
enable_managed_identity=False,
assign_identity=None,
yes=False,
no_wait=False,
enable_azure_rbac=False,
disable_azure_rbac=False):
ManagedClusterSKU = cmd.get_models('ManagedClusterSKU',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterAADProfile = cmd.get_models('ManagedClusterAADProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterIdentity = cmd.get_models('ManagedClusterIdentity',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties = cmd.get_models(
'Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (
aad_tenant_id is None and aad_admin_group_object_ids is None and
not enable_azure_rbac and not disable_azure_rbac)
# pylint: disable=too-many-boolean-expressions
if (update_autoscaler != 1 and cluster_autoscaler_profile is None and
not update_lb_profile and
not attach_acr and
not detach_acr and
not uptime_sla and
not no_uptime_sla and
api_server_authorized_ip_ranges is None and
not enable_aad and
not update_aad_profile and
not enable_ahub and
not disable_ahub and
not windows_admin_password and
not enable_managed_identity and
not assign_identity):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--load-balancer-managed-outbound-ip-count" or'
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or'
'"--load-balancer-outbound-ports" or'
'"--load-balancer-idle-timeout" or'
'"--attach-acr" or "--detach-acr" or'
'"--uptime-sla" or'
'"--no-uptime-sla" or '
'"--api-server-authorized-ip-ranges" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub" or '
'"--windows-admin-password" or '
'"--enable-managed-identity" or '
'"--assign-identity" or '
'"--enable-azure-rbac" or '
'"--disable-azure-rbac"')
if not enable_managed_identity and assign_identity:
raise CLIError(
'--assign-identity can only be specified when --enable-managed-identity is specified')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning(
'Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear autoscaler profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if _is_msi_cluster(instance):
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
if uptime_sla and no_uptime_sla:
raise CLIError(
'Cannot specify "--uptime-sla" and "--no-uptime-sla" at the same time.')
if uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if no_uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Free"
)
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
cmd,
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(
cmd,
api_server_authorized_ip_ranges, instance=instance)
if enable_aad:
if instance.aad_profile is not None and instance.aad_profile.managed:
raise CLIError(
'Cannot specify "--enable-aad" if managed AAD is already enabled')
instance.aad_profile = ManagedClusterAADProfile(
managed=True
)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids/"'
'"--enable-azure-rbac/--disable-azure-rbac"'
' if managed AAD is not enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
# ids -> i_ds due to track 2 naming issue
instance.aad_profile.admin_group_object_i_ds = _parse_comma_separated_list(
aad_admin_group_object_ids)
if enable_azure_rbac and disable_azure_rbac:
raise MutuallyExclusiveArgumentError(
'Cannot specify "--enable-azure-rbac" and "--disable-azure-rbac" at the same time')
if enable_azure_rbac:
instance.aad_profile.enable_azure_rbac = True
if disable_azure_rbac:
instance.aad_profile.enable_azure_rbac = False
if enable_ahub and disable_ahub:
raise CLIError(
'Cannot specify "--enable-ahub" and "--disable-ahub" at the same time')
if enable_ahub:
instance.windows_profile.license_type = 'Windows_Server'
if disable_ahub:
instance.windows_profile.license_type = 'None'
if windows_admin_password:
instance.windows_profile.admin_password = windows_admin_password
current_identity_type = "spn"
if instance.identity is not None:
current_identity_type = instance.identity.type.casefold()
goal_identity_type = current_identity_type
if enable_managed_identity:
if not assign_identity:
goal_identity_type = "systemassigned"
else:
goal_identity_type = "userassigned"
if current_identity_type != goal_identity_type:
msg = ""
if current_identity_type == "spn":
msg = ('Your cluster is using service principal, and you are going to update '
'the cluster to use {} managed identity.\n After updating, your '
'cluster\'s control plane and addon pods will switch to use managed '
'identity, but kubelet will KEEP USING SERVICE PRINCIPAL '
'until you upgrade your agentpool.\n '
'Are you sure you want to perform this operation?').format(goal_identity_type)
else:
msg = ('Your cluster is already using {} managed identity, and you are going to '
'update the cluster to use {} managed identity. \nAre you sure you want to '
'perform this operation?').format(current_identity_type, goal_identity_type)
if not yes and not prompt_y_n(msg, default="n"):
return None
if goal_identity_type == "systemassigned":
instance.identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif goal_identity_type == "userassigned":
# pylint: disable=line-too-long
user_assigned_identity = {
assign_identity: Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties()
}
instance.identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
monitoring_addon_enabled = False
ingress_appgw_addon_enabled = False
virtual_node_addon_enabled = False
if instance.addon_profiles is not None:
monitoring_addon_enabled = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
virtual_node_addon_enabled = CONST_VIRTUAL_NODE_ADDON_NAME + 'Linux' in instance.addon_profiles and \
instance.addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME +
'Linux'].enabled
return _put_managed_cluster_ensuring_permission(
cmd,
client,
subscription_id,
resource_group_name,
name,
instance,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
False,
instance.agent_pool_profiles[0].vnet_subnet_id,
_is_msi_cluster(instance),
attach_acr,
None,
no_wait)
# pylint: disable=unused-argument,inconsistent-return-statements,too-many-return-statements
def aks_upgrade(cmd,
client,
resource_group_name, name,
kubernetes_version='',
control_plane_only=False,
node_image_only=False,
no_wait=False,
yes=False):
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(True, agent_pool_client,
resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name):
return sdk_no_wait(
no_wait,
client.begin_upgrade_node_image_version,
resource_group_name,
cluster_name,
nodepool_name,
)
def aks_runcommand(cmd, client, resource_group_name, name, command_string="", command_files=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not command_string:
raise ValidationError('Command cannot be empty.')
RunCommandRequest = cmd.get_models('RunCommandRequest', resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
request_payload = RunCommandRequest(command=command_string)
request_payload.context = _get_command_context(command_files)
# if this cluster have Azure AD enabled, we should pass user token.
# so the command execution also using current user identity.
# here we aquire token for AKS managed server AppID (same id for all cloud)
if mc.aad_profile is not None and mc.aad_profile.managed:
request_payload.cluster_token = _get_dataplane_aad_token(
cmd.cli_ctx, "6dae42f8-4368-4678-94ff-3960e28e3630")
commandResultFuture = client.begin_run_command(
resource_group_name, name, request_payload, polling_interval=5, retry_total=0)
return _print_command_result(cmd.cli_ctx, commandResultFuture.result(300))
def aks_command_result(cmd, client, resource_group_name, name, command_id=""):
if not command_id:
raise ValidationError('CommandID cannot be empty.')
commandResult = client.get_command_result(
resource_group_name, name, command_id)
return _print_command_result(cmd.cli_ctx, commandResult)
def _print_command_result(cli_ctx, commandResult):
# cli_ctx.data['safe_params'] contains list of parameter name user typed in, without value.
# cli core also use this calculate ParameterSetName header for all http request from cli.
if (cli_ctx.data['safe_params'] is None or
"-o" in cli_ctx.data['safe_params'] or
"--output" in cli_ctx.data['safe_params']):
# user specified output format, honor their choice, return object to render pipeline
return commandResult
# user didn't specified any format, we can customize the print for best experience
if commandResult.provisioning_state == "Succeeded":
# succeed, print exitcode, and logs
print(
f"{colorama.Fore.GREEN}command started at {commandResult.started_at}, "
f"finished at {commandResult.finished_at} "
f"with exitcode={commandResult.exit_code}{colorama.Style.RESET_ALL}")
print(commandResult.logs)
return
if commandResult.provisioning_state == "Failed":
# failed, print reason in error
print(
f"{colorama.Fore.RED}command failed with reason: {commandResult.reason}{colorama.Style.RESET_ALL}")
return
# *-ing state
print(f"{colorama.Fore.BLUE}command is in : {commandResult.provisioning_state} state{colorama.Style.RESET_ALL}")
return None
def _get_command_context(command_files):
if not command_files:
return ""
filesToAttach = {}
# . means to attach current folder, cannot combine more files. (at least for now)
if len(command_files) == 1 and command_files[0] == ".":
# current folder
cwd = os.getcwd()
for filefolder, _, files in os.walk(cwd):
for file in files:
# retain folder structure
rel = os.path.relpath(filefolder, cwd)
filesToAttach[os.path.join(
filefolder, file)] = os.path.join(rel, file)
else:
for file in command_files:
if file == ".":
raise ValidationError(
". is used to attach current folder, not expecting other attachements.")
if os.path.isfile(file):
# for individual attached file, flatten them to same folder
filesToAttach[file] = os.path.basename(file)
else:
raise ValidationError(
f"{file} is not valid file, or not accessable.")
if len(filesToAttach) < 1:
logger.debug("no files to attach!")
return ""
zipStream = io.BytesIO()
zipFile = zipfile.ZipFile(zipStream, "w")
for _, (osfile, zipEntry) in enumerate(filesToAttach.items()):
zipFile.write(osfile, zipEntry)
# zipFile.printdir() // use this to debug
zipFile.close()
return str(base64.encodebytes(zipStream.getbuffer()), "utf-8")
def _get_dataplane_aad_token(cli_ctx, serverAppId):
# this function is mostly copied from keyvault cli
import adal
try:
return Profile(cli_ctx=cli_ctx).get_raw_token(resource=serverAppId)[0][2].get('accessToken')
except adal.AdalError as err:
# pylint: disable=no-member
if (hasattr(err, 'error_response') and
('error_description' in err.error_response) and
('AADSTS70008:' in err.error_response['error_description'])):
raise CLIError(
"Credentials have expired due to inactivity. Please run 'az login'")
raise CLIError(err)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None,
endpoint_type='Public', prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an \
interactive selection experience.
:type space_name: String
:param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \
See https://aka.ms/azds-networking for more information.
:type endpoint_type: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(
DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(
name, resource_group_name, update, space_name, endpoint_type, prompt)
except TypeError:
raise CLIError(
"Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(
DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(
name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.begin_rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
no_wait=False):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(
addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id}
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError(
'The aci-connector addon requires setting a subnet name.')
addon_profile.config = {
CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise ValidationError('The confcom addon is already enabled for this managed cluster.',
recommendation='To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(
enabled=False)
else:
raise CLIError(
"The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None,
aci_subnet_name=None,
vnet_subnet_id=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('azure-policy')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError(
'"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error(
"Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (
ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2",
"brazilsouth": "CQ",
"brazilsoutheast": "BRSE",
"norwayeast": "NOE",
"southafricanorth": "JNB",
"northcentralus": "NCUS",
"uaenorth": "DXB",
"germanywestcentral": "DEWC",
"ukwest": "WUK",
"switzerlandnorth": "CHN",
"switzerlandwest": "CHW",
"uaecentral": "AUH"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "brazilsouth",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "northcentralus",
"northeurope": "northeurope",
"southafricanorth": "southafricanorth",
"southafricawest": "southafricanorth",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "ukwest",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2",
"norwayeast": "norwayeast",
"norwaywest": "norwayeast",
"switzerlandnorth": "switzerlandnorth",
"switzerlandwest": "switzerlandwest",
"uaenorth": "uaenorth",
"germanywestcentral": "germanywestcentral",
"germanynorth": "germanywestcentral",
"uaecentral": "uaecentral",
"eastus2euap": "eastus2euap",
"brazilsoutheast": "brazilsoutheast"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV",
"usgovarizona": "PHX"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia",
"usgovtexas": "usgovvirginia",
"usgovarizona": "usgovarizona"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(
rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(
workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(
rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(
workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(
rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(
workspace_region, "USGV")
else:
workspace_region = rg_location
workspace_region_code = rg_location.upper()
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(
subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id,
default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(
default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
# TODO: track2/replace create_or_update with begin_create_or_update, depends on 'azure.mgmt.resource.resources'
resource_groups.create_or_update(default_workspace_resource_group, {
'location': workspace_region})
GenericResource = cmd.get_models(
'GenericResource', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
generic_resource = GenericResource(location=workspace_region, properties={
'sku': {'name': 'standalone'}})
async_poller = resources.begin_create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
generic_resource)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
for key in list(addon.config):
if (key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and
key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID):
addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop(
key)
workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID]
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError(
'Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(
workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(
unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(
cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(
parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(
cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(
cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError(
"ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
tags=None,
labels=None,
max_surge=None,
mode="User",
enable_encryption_at_host=False,
enable_ultra_ssd=False,
no_wait=False):
AgentPool = cmd.get_models('AgentPool',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='agent_pools')
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='agent_pools')
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
scale_set_priority=priority,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
node_taints=taints_array,
upgrade_settings=upgradeSettings,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
mode=mode
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
agent_pool,
)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError(
"The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
nodepool_name,
kubernetes_version='',
node_image_only=False,
max_surge=None,
no_wait=False):
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings', operation_group='agent_pools')
if kubernetes_version != '' and node_image_only:
raise CLIError(
'Conflicting flags. Upgrading the Kubernetes version will also '
'upgrade node image version. If you only want to upgrade the '
'node version please use the "--node-image-only" option only.'
)
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
tags=None,
max_surge=None,
mode=None,
no_wait=False):
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='agent_pools')
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
if update_autoscaler > 1:
raise CLIError('Please specify one of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
if (update_autoscaler == 0 and not tags and not mode and not max_surge):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning(
'Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_get_upgrade_profile(cmd, client, resource_group_name, cluster_name, nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
fqdn_subdomain=None,
location=None,
name=None):
aad_session_key = None
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
if dns_name_prefix:
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
else:
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, fqdn_subdomain, location)
service_principal, aad_session_key = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
'aad_session_key': aad_session_key,
}
def _ensure_osa_aad(cmd,
cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
OpenShiftManagedClusterAADIdentityProvider = cmd.get_models('OpenShiftManagedClusterAADIdentityProvider',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(
identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result, _aad_session_key = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[
app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
service_principal, _aad_session_key = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
def _create_client_secret():
# Add a special character to satisfy AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(
os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError(
'Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError(
'Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError(
'node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError(
'min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'Value of min-count should be less than or equal to value of max-count.')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(
path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning(
'Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if hasattr(managed_cluster, attr) and getattr(managed_cluster, attr) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def _format_workspace_id(workspace_id):
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
return workspace_id
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
OpenShiftManagedClusterAgentPoolProfile = cmd.get_models('OpenShiftManagedClusterAgentPoolProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftAgentPoolProfileRole = cmd.get_models('OpenShiftAgentPoolProfileRole',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterIdentityProvider = cmd.get_models('OpenShiftManagedClusterIdentityProvider',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedCluster = cmd.get_models('OpenShiftManagedCluster',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftRouterProfile = cmd.get_models('OpenShiftRouterProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
NetworkProfile = cmd.get_models('NetworkProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterAuthProfile = cmd.get_models('OpenShiftManagedClusterAuthProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('Support for the creation of ARO 3.11 clusters ends 30 Nov 2020. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd,
cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(
identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(
vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd,
cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError(
'Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError(
'Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(
compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False):
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False):
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def _is_msi_cluster(managed_cluster):
return (managed_cluster and managed_cluster.identity and
(managed_cluster.identity.type.casefold() == "systemassigned" or
managed_cluster.identity.type.casefold() == "userassigned"))
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
need_grant_vnet_permission_to_cluster_identity,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = (monitoring_addon_enabled or
ingress_appgw_addon_enabled or
(enable_managed_identity and attach_acr) or
virtual_node_addon_enabled or
need_grant_vnet_permission_to_cluster_identity)
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(client.begin_create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(cluster, cmd)
if virtual_node_addon_enabled:
_add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id)
if need_grant_vnet_permission_to_cluster_identity:
if not _create_role_assignment(cmd.cli_ctx, 'Network Contributor',
cluster.identity.principal_id, scope=vnet_subnet_id,
resolve_assignee=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_client_id = cluster.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
else:
cluster = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers)
return cluster
def _ensure_cluster_identity_permission_on_kubelet_identity(cli_ctx, cluster_identity_object_id, scope):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not _add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise UnauthorizedError('Could not grant Managed Identity Operator '
'permission to cluster identity at scope {}'.format(scope))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.